Sat, 03 Jan 2015 20:18:00 +0100
Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.
michael@0 | 1 | /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- |
michael@0 | 2 | * vim: set ts=8 sts=4 et sw=4 tw=99: |
michael@0 | 3 | * This Source Code Form is subject to the terms of the Mozilla Public |
michael@0 | 4 | * License, v. 2.0. If a copy of the MPL was not distributed with this |
michael@0 | 5 | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
michael@0 | 6 | |
michael@0 | 7 | #include "jit/arm/Assembler-arm.h" |
michael@0 | 8 | |
michael@0 | 9 | #include "mozilla/DebugOnly.h" |
michael@0 | 10 | #include "mozilla/MathAlgorithms.h" |
michael@0 | 11 | |
michael@0 | 12 | #include "jscompartment.h" |
michael@0 | 13 | #include "jsutil.h" |
michael@0 | 14 | |
michael@0 | 15 | #include "assembler/jit/ExecutableAllocator.h" |
michael@0 | 16 | #include "gc/Marking.h" |
michael@0 | 17 | #include "jit/arm/MacroAssembler-arm.h" |
michael@0 | 18 | #include "jit/JitCompartment.h" |
michael@0 | 19 | |
michael@0 | 20 | using namespace js; |
michael@0 | 21 | using namespace js::jit; |
michael@0 | 22 | |
michael@0 | 23 | using mozilla::CountLeadingZeroes32; |
michael@0 | 24 | |
michael@0 | 25 | // Note this is used for inter-AsmJS calls and may pass arguments and results |
michael@0 | 26 | // in floating point registers even if the system ABI does not. |
michael@0 | 27 | ABIArgGenerator::ABIArgGenerator() : |
michael@0 | 28 | intRegIndex_(0), |
michael@0 | 29 | floatRegIndex_(0), |
michael@0 | 30 | stackOffset_(0), |
michael@0 | 31 | current_() |
michael@0 | 32 | {} |
michael@0 | 33 | |
michael@0 | 34 | ABIArg |
michael@0 | 35 | ABIArgGenerator::next(MIRType type) |
michael@0 | 36 | { |
michael@0 | 37 | switch (type) { |
michael@0 | 38 | case MIRType_Int32: |
michael@0 | 39 | case MIRType_Pointer: |
michael@0 | 40 | if (intRegIndex_ == NumIntArgRegs) { |
michael@0 | 41 | current_ = ABIArg(stackOffset_); |
michael@0 | 42 | stackOffset_ += sizeof(uint32_t); |
michael@0 | 43 | break; |
michael@0 | 44 | } |
michael@0 | 45 | current_ = ABIArg(Register::FromCode(intRegIndex_)); |
michael@0 | 46 | intRegIndex_++; |
michael@0 | 47 | break; |
michael@0 | 48 | case MIRType_Float32: |
michael@0 | 49 | case MIRType_Double: |
michael@0 | 50 | if (floatRegIndex_ == NumFloatArgRegs) { |
michael@0 | 51 | static const int align = sizeof(double) - 1; |
michael@0 | 52 | stackOffset_ = (stackOffset_ + align) & ~align; |
michael@0 | 53 | current_ = ABIArg(stackOffset_); |
michael@0 | 54 | stackOffset_ += sizeof(uint64_t); |
michael@0 | 55 | break; |
michael@0 | 56 | } |
michael@0 | 57 | current_ = ABIArg(FloatRegister::FromCode(floatRegIndex_)); |
michael@0 | 58 | floatRegIndex_++; |
michael@0 | 59 | break; |
michael@0 | 60 | default: |
michael@0 | 61 | MOZ_ASSUME_UNREACHABLE("Unexpected argument type"); |
michael@0 | 62 | } |
michael@0 | 63 | |
michael@0 | 64 | return current_; |
michael@0 | 65 | } |
michael@0 | 66 | const Register ABIArgGenerator::NonArgReturnVolatileReg0 = r4; |
michael@0 | 67 | const Register ABIArgGenerator::NonArgReturnVolatileReg1 = r5; |
michael@0 | 68 | |
michael@0 | 69 | // Encode a standard register when it is being used as src1, the dest, and |
michael@0 | 70 | // an extra register. These should never be called with an InvalidReg. |
michael@0 | 71 | uint32_t |
michael@0 | 72 | js::jit::RT(Register r) |
michael@0 | 73 | { |
michael@0 | 74 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 75 | return r.code() << 12; |
michael@0 | 76 | } |
michael@0 | 77 | |
michael@0 | 78 | uint32_t |
michael@0 | 79 | js::jit::RN(Register r) |
michael@0 | 80 | { |
michael@0 | 81 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 82 | return r.code() << 16; |
michael@0 | 83 | } |
michael@0 | 84 | |
michael@0 | 85 | uint32_t |
michael@0 | 86 | js::jit::RD(Register r) |
michael@0 | 87 | { |
michael@0 | 88 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 89 | return r.code() << 12; |
michael@0 | 90 | } |
michael@0 | 91 | |
michael@0 | 92 | uint32_t |
michael@0 | 93 | js::jit::RM(Register r) |
michael@0 | 94 | { |
michael@0 | 95 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 96 | return r.code() << 8; |
michael@0 | 97 | } |
michael@0 | 98 | |
michael@0 | 99 | // Encode a standard register when it is being used as src1, the dest, and |
michael@0 | 100 | // an extra register. For these, an InvalidReg is used to indicate a optional |
michael@0 | 101 | // register that has been omitted. |
michael@0 | 102 | uint32_t |
michael@0 | 103 | js::jit::maybeRT(Register r) |
michael@0 | 104 | { |
michael@0 | 105 | if (r == InvalidReg) |
michael@0 | 106 | return 0; |
michael@0 | 107 | |
michael@0 | 108 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 109 | return r.code() << 12; |
michael@0 | 110 | } |
michael@0 | 111 | |
michael@0 | 112 | uint32_t |
michael@0 | 113 | js::jit::maybeRN(Register r) |
michael@0 | 114 | { |
michael@0 | 115 | if (r == InvalidReg) |
michael@0 | 116 | return 0; |
michael@0 | 117 | |
michael@0 | 118 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 119 | return r.code() << 16; |
michael@0 | 120 | } |
michael@0 | 121 | |
michael@0 | 122 | uint32_t |
michael@0 | 123 | js::jit::maybeRD(Register r) |
michael@0 | 124 | { |
michael@0 | 125 | if (r == InvalidReg) |
michael@0 | 126 | return 0; |
michael@0 | 127 | |
michael@0 | 128 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 129 | return r.code() << 12; |
michael@0 | 130 | } |
michael@0 | 131 | |
michael@0 | 132 | Register |
michael@0 | 133 | js::jit::toRD(Instruction &i) |
michael@0 | 134 | { |
michael@0 | 135 | return Register::FromCode((i.encode()>>12) & 0xf); |
michael@0 | 136 | } |
michael@0 | 137 | Register |
michael@0 | 138 | js::jit::toR(Instruction &i) |
michael@0 | 139 | { |
michael@0 | 140 | return Register::FromCode(i.encode() & 0xf); |
michael@0 | 141 | } |
michael@0 | 142 | |
michael@0 | 143 | Register |
michael@0 | 144 | js::jit::toRM(Instruction &i) |
michael@0 | 145 | { |
michael@0 | 146 | return Register::FromCode((i.encode()>>8) & 0xf); |
michael@0 | 147 | } |
michael@0 | 148 | |
michael@0 | 149 | Register |
michael@0 | 150 | js::jit::toRN(Instruction &i) |
michael@0 | 151 | { |
michael@0 | 152 | return Register::FromCode((i.encode()>>16) & 0xf); |
michael@0 | 153 | } |
michael@0 | 154 | |
michael@0 | 155 | uint32_t |
michael@0 | 156 | js::jit::VD(VFPRegister vr) |
michael@0 | 157 | { |
michael@0 | 158 | if (vr.isMissing()) |
michael@0 | 159 | return 0; |
michael@0 | 160 | |
michael@0 | 161 | //bits 15,14,13,12, 22 |
michael@0 | 162 | VFPRegister::VFPRegIndexSplit s = vr.encode(); |
michael@0 | 163 | return s.bit << 22 | s.block << 12; |
michael@0 | 164 | } |
michael@0 | 165 | uint32_t |
michael@0 | 166 | js::jit::VN(VFPRegister vr) |
michael@0 | 167 | { |
michael@0 | 168 | if (vr.isMissing()) |
michael@0 | 169 | return 0; |
michael@0 | 170 | |
michael@0 | 171 | // bits 19,18,17,16, 7 |
michael@0 | 172 | VFPRegister::VFPRegIndexSplit s = vr.encode(); |
michael@0 | 173 | return s.bit << 7 | s.block << 16; |
michael@0 | 174 | } |
michael@0 | 175 | uint32_t |
michael@0 | 176 | js::jit::VM(VFPRegister vr) |
michael@0 | 177 | { |
michael@0 | 178 | if (vr.isMissing()) |
michael@0 | 179 | return 0; |
michael@0 | 180 | |
michael@0 | 181 | // bits 5, 3,2,1,0 |
michael@0 | 182 | VFPRegister::VFPRegIndexSplit s = vr.encode(); |
michael@0 | 183 | return s.bit << 5 | s.block; |
michael@0 | 184 | } |
michael@0 | 185 | |
michael@0 | 186 | VFPRegister::VFPRegIndexSplit |
michael@0 | 187 | jit::VFPRegister::encode() |
michael@0 | 188 | { |
michael@0 | 189 | JS_ASSERT(!_isInvalid); |
michael@0 | 190 | |
michael@0 | 191 | switch (kind) { |
michael@0 | 192 | case Double: |
michael@0 | 193 | return VFPRegIndexSplit(_code &0xf , _code >> 4); |
michael@0 | 194 | case Single: |
michael@0 | 195 | return VFPRegIndexSplit(_code >> 1, _code & 1); |
michael@0 | 196 | default: |
michael@0 | 197 | // vfp register treated as an integer, NOT a gpr |
michael@0 | 198 | return VFPRegIndexSplit(_code >> 1, _code & 1); |
michael@0 | 199 | } |
michael@0 | 200 | } |
michael@0 | 201 | |
michael@0 | 202 | VFPRegister js::jit::NoVFPRegister(true); |
michael@0 | 203 | |
michael@0 | 204 | bool |
michael@0 | 205 | InstDTR::isTHIS(const Instruction &i) |
michael@0 | 206 | { |
michael@0 | 207 | return (i.encode() & IsDTRMask) == (uint32_t)IsDTR; |
michael@0 | 208 | } |
michael@0 | 209 | |
michael@0 | 210 | InstDTR * |
michael@0 | 211 | InstDTR::asTHIS(const Instruction &i) |
michael@0 | 212 | { |
michael@0 | 213 | if (isTHIS(i)) |
michael@0 | 214 | return (InstDTR*)&i; |
michael@0 | 215 | return nullptr; |
michael@0 | 216 | } |
michael@0 | 217 | |
michael@0 | 218 | bool |
michael@0 | 219 | InstLDR::isTHIS(const Instruction &i) |
michael@0 | 220 | { |
michael@0 | 221 | return (i.encode() & IsDTRMask) == (uint32_t)IsDTR; |
michael@0 | 222 | } |
michael@0 | 223 | |
michael@0 | 224 | InstLDR * |
michael@0 | 225 | InstLDR::asTHIS(const Instruction &i) |
michael@0 | 226 | { |
michael@0 | 227 | if (isTHIS(i)) |
michael@0 | 228 | return (InstLDR*)&i; |
michael@0 | 229 | return nullptr; |
michael@0 | 230 | } |
michael@0 | 231 | |
michael@0 | 232 | InstNOP * |
michael@0 | 233 | InstNOP::asTHIS(Instruction &i) |
michael@0 | 234 | { |
michael@0 | 235 | if (isTHIS(i)) |
michael@0 | 236 | return (InstNOP*) (&i); |
michael@0 | 237 | return nullptr; |
michael@0 | 238 | } |
michael@0 | 239 | |
michael@0 | 240 | bool |
michael@0 | 241 | InstNOP::isTHIS(const Instruction &i) |
michael@0 | 242 | { |
michael@0 | 243 | return (i.encode() & 0x0fffffff) == NopInst; |
michael@0 | 244 | } |
michael@0 | 245 | |
michael@0 | 246 | bool |
michael@0 | 247 | InstBranchReg::isTHIS(const Instruction &i) |
michael@0 | 248 | { |
michael@0 | 249 | return InstBXReg::isTHIS(i) || InstBLXReg::isTHIS(i); |
michael@0 | 250 | } |
michael@0 | 251 | |
michael@0 | 252 | InstBranchReg * |
michael@0 | 253 | InstBranchReg::asTHIS(const Instruction &i) |
michael@0 | 254 | { |
michael@0 | 255 | if (isTHIS(i)) |
michael@0 | 256 | return (InstBranchReg*)&i; |
michael@0 | 257 | return nullptr; |
michael@0 | 258 | } |
michael@0 | 259 | void |
michael@0 | 260 | InstBranchReg::extractDest(Register *dest) |
michael@0 | 261 | { |
michael@0 | 262 | *dest = toR(*this); |
michael@0 | 263 | } |
michael@0 | 264 | bool |
michael@0 | 265 | InstBranchReg::checkDest(Register dest) |
michael@0 | 266 | { |
michael@0 | 267 | return dest == toR(*this); |
michael@0 | 268 | } |
michael@0 | 269 | |
michael@0 | 270 | bool |
michael@0 | 271 | InstBranchImm::isTHIS(const Instruction &i) |
michael@0 | 272 | { |
michael@0 | 273 | return InstBImm::isTHIS(i) || InstBLImm::isTHIS(i); |
michael@0 | 274 | } |
michael@0 | 275 | |
michael@0 | 276 | InstBranchImm * |
michael@0 | 277 | InstBranchImm::asTHIS(const Instruction &i) |
michael@0 | 278 | { |
michael@0 | 279 | if (isTHIS(i)) |
michael@0 | 280 | return (InstBranchImm*)&i; |
michael@0 | 281 | return nullptr; |
michael@0 | 282 | } |
michael@0 | 283 | |
michael@0 | 284 | void |
michael@0 | 285 | InstBranchImm::extractImm(BOffImm *dest) |
michael@0 | 286 | { |
michael@0 | 287 | *dest = BOffImm(*this); |
michael@0 | 288 | } |
michael@0 | 289 | |
michael@0 | 290 | bool |
michael@0 | 291 | InstBXReg::isTHIS(const Instruction &i) |
michael@0 | 292 | { |
michael@0 | 293 | return (i.encode() & IsBRegMask) == IsBX; |
michael@0 | 294 | } |
michael@0 | 295 | |
michael@0 | 296 | InstBXReg * |
michael@0 | 297 | InstBXReg::asTHIS(const Instruction &i) |
michael@0 | 298 | { |
michael@0 | 299 | if (isTHIS(i)) |
michael@0 | 300 | return (InstBXReg*)&i; |
michael@0 | 301 | return nullptr; |
michael@0 | 302 | } |
michael@0 | 303 | |
michael@0 | 304 | bool |
michael@0 | 305 | InstBLXReg::isTHIS(const Instruction &i) |
michael@0 | 306 | { |
michael@0 | 307 | return (i.encode() & IsBRegMask) == IsBLX; |
michael@0 | 308 | |
michael@0 | 309 | } |
michael@0 | 310 | InstBLXReg * |
michael@0 | 311 | InstBLXReg::asTHIS(const Instruction &i) |
michael@0 | 312 | { |
michael@0 | 313 | if (isTHIS(i)) |
michael@0 | 314 | return (InstBLXReg*)&i; |
michael@0 | 315 | return nullptr; |
michael@0 | 316 | } |
michael@0 | 317 | |
michael@0 | 318 | bool |
michael@0 | 319 | InstBImm::isTHIS(const Instruction &i) |
michael@0 | 320 | { |
michael@0 | 321 | return (i.encode () & IsBImmMask) == IsB; |
michael@0 | 322 | } |
michael@0 | 323 | InstBImm * |
michael@0 | 324 | InstBImm::asTHIS(const Instruction &i) |
michael@0 | 325 | { |
michael@0 | 326 | if (isTHIS(i)) |
michael@0 | 327 | return (InstBImm*)&i; |
michael@0 | 328 | return nullptr; |
michael@0 | 329 | } |
michael@0 | 330 | |
michael@0 | 331 | bool |
michael@0 | 332 | InstBLImm::isTHIS(const Instruction &i) |
michael@0 | 333 | { |
michael@0 | 334 | return (i.encode () & IsBImmMask) == IsBL; |
michael@0 | 335 | |
michael@0 | 336 | } |
michael@0 | 337 | InstBLImm * |
michael@0 | 338 | InstBLImm::asTHIS(Instruction &i) |
michael@0 | 339 | { |
michael@0 | 340 | if (isTHIS(i)) |
michael@0 | 341 | return (InstBLImm*)&i; |
michael@0 | 342 | return nullptr; |
michael@0 | 343 | } |
michael@0 | 344 | |
michael@0 | 345 | bool |
michael@0 | 346 | InstMovWT::isTHIS(Instruction &i) |
michael@0 | 347 | { |
michael@0 | 348 | return InstMovW::isTHIS(i) || InstMovT::isTHIS(i); |
michael@0 | 349 | } |
michael@0 | 350 | InstMovWT * |
michael@0 | 351 | InstMovWT::asTHIS(Instruction &i) |
michael@0 | 352 | { |
michael@0 | 353 | if (isTHIS(i)) |
michael@0 | 354 | return (InstMovWT*)&i; |
michael@0 | 355 | return nullptr; |
michael@0 | 356 | } |
michael@0 | 357 | |
michael@0 | 358 | void |
michael@0 | 359 | InstMovWT::extractImm(Imm16 *imm) |
michael@0 | 360 | { |
michael@0 | 361 | *imm = Imm16(*this); |
michael@0 | 362 | } |
michael@0 | 363 | bool |
michael@0 | 364 | InstMovWT::checkImm(Imm16 imm) |
michael@0 | 365 | { |
michael@0 | 366 | return imm.decode() == Imm16(*this).decode(); |
michael@0 | 367 | } |
michael@0 | 368 | |
michael@0 | 369 | void |
michael@0 | 370 | InstMovWT::extractDest(Register *dest) |
michael@0 | 371 | { |
michael@0 | 372 | *dest = toRD(*this); |
michael@0 | 373 | } |
michael@0 | 374 | bool |
michael@0 | 375 | InstMovWT::checkDest(Register dest) |
michael@0 | 376 | { |
michael@0 | 377 | return dest == toRD(*this); |
michael@0 | 378 | } |
michael@0 | 379 | |
michael@0 | 380 | bool |
michael@0 | 381 | InstMovW::isTHIS(const Instruction &i) |
michael@0 | 382 | { |
michael@0 | 383 | return (i.encode() & IsWTMask) == IsW; |
michael@0 | 384 | } |
michael@0 | 385 | |
michael@0 | 386 | InstMovW * |
michael@0 | 387 | InstMovW::asTHIS(const Instruction &i) |
michael@0 | 388 | { |
michael@0 | 389 | if (isTHIS(i)) |
michael@0 | 390 | return (InstMovW*) (&i); |
michael@0 | 391 | return nullptr; |
michael@0 | 392 | } |
michael@0 | 393 | InstMovT * |
michael@0 | 394 | InstMovT::asTHIS(const Instruction &i) |
michael@0 | 395 | { |
michael@0 | 396 | if (isTHIS(i)) |
michael@0 | 397 | return (InstMovT*) (&i); |
michael@0 | 398 | return nullptr; |
michael@0 | 399 | } |
michael@0 | 400 | |
michael@0 | 401 | bool |
michael@0 | 402 | InstMovT::isTHIS(const Instruction &i) |
michael@0 | 403 | { |
michael@0 | 404 | return (i.encode() & IsWTMask) == IsT; |
michael@0 | 405 | } |
michael@0 | 406 | |
michael@0 | 407 | InstALU * |
michael@0 | 408 | InstALU::asTHIS(const Instruction &i) |
michael@0 | 409 | { |
michael@0 | 410 | if (isTHIS(i)) |
michael@0 | 411 | return (InstALU*) (&i); |
michael@0 | 412 | return nullptr; |
michael@0 | 413 | } |
michael@0 | 414 | bool |
michael@0 | 415 | InstALU::isTHIS(const Instruction &i) |
michael@0 | 416 | { |
michael@0 | 417 | return (i.encode() & ALUMask) == 0; |
michael@0 | 418 | } |
michael@0 | 419 | void |
michael@0 | 420 | InstALU::extractOp(ALUOp *ret) |
michael@0 | 421 | { |
michael@0 | 422 | *ret = ALUOp(encode() & (0xf << 21)); |
michael@0 | 423 | } |
michael@0 | 424 | bool |
michael@0 | 425 | InstALU::checkOp(ALUOp op) |
michael@0 | 426 | { |
michael@0 | 427 | ALUOp mine; |
michael@0 | 428 | extractOp(&mine); |
michael@0 | 429 | return mine == op; |
michael@0 | 430 | } |
michael@0 | 431 | void |
michael@0 | 432 | InstALU::extractDest(Register *ret) |
michael@0 | 433 | { |
michael@0 | 434 | *ret = toRD(*this); |
michael@0 | 435 | } |
michael@0 | 436 | bool |
michael@0 | 437 | InstALU::checkDest(Register rd) |
michael@0 | 438 | { |
michael@0 | 439 | return rd == toRD(*this); |
michael@0 | 440 | } |
michael@0 | 441 | void |
michael@0 | 442 | InstALU::extractOp1(Register *ret) |
michael@0 | 443 | { |
michael@0 | 444 | *ret = toRN(*this); |
michael@0 | 445 | } |
michael@0 | 446 | bool |
michael@0 | 447 | InstALU::checkOp1(Register rn) |
michael@0 | 448 | { |
michael@0 | 449 | return rn == toRN(*this); |
michael@0 | 450 | } |
michael@0 | 451 | Operand2 |
michael@0 | 452 | InstALU::extractOp2() |
michael@0 | 453 | { |
michael@0 | 454 | return Operand2(encode()); |
michael@0 | 455 | } |
michael@0 | 456 | |
michael@0 | 457 | InstCMP * |
michael@0 | 458 | InstCMP::asTHIS(const Instruction &i) |
michael@0 | 459 | { |
michael@0 | 460 | if (isTHIS(i)) |
michael@0 | 461 | return (InstCMP*) (&i); |
michael@0 | 462 | return nullptr; |
michael@0 | 463 | } |
michael@0 | 464 | |
michael@0 | 465 | bool |
michael@0 | 466 | InstCMP::isTHIS(const Instruction &i) |
michael@0 | 467 | { |
michael@0 | 468 | return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkDest(r0) && InstALU::asTHIS(i)->checkOp(op_cmp); |
michael@0 | 469 | } |
michael@0 | 470 | |
michael@0 | 471 | InstMOV * |
michael@0 | 472 | InstMOV::asTHIS(const Instruction &i) |
michael@0 | 473 | { |
michael@0 | 474 | if (isTHIS(i)) |
michael@0 | 475 | return (InstMOV*) (&i); |
michael@0 | 476 | return nullptr; |
michael@0 | 477 | } |
michael@0 | 478 | |
michael@0 | 479 | bool |
michael@0 | 480 | InstMOV::isTHIS(const Instruction &i) |
michael@0 | 481 | { |
michael@0 | 482 | return InstALU::isTHIS(i) && InstALU::asTHIS(i)->checkOp1(r0) && InstALU::asTHIS(i)->checkOp(op_mov); |
michael@0 | 483 | } |
michael@0 | 484 | |
michael@0 | 485 | Op2Reg |
michael@0 | 486 | Operand2::toOp2Reg() { |
michael@0 | 487 | return *(Op2Reg*)this; |
michael@0 | 488 | } |
michael@0 | 489 | O2RegImmShift |
michael@0 | 490 | Op2Reg::toO2RegImmShift() { |
michael@0 | 491 | return *(O2RegImmShift*)this; |
michael@0 | 492 | } |
michael@0 | 493 | O2RegRegShift |
michael@0 | 494 | Op2Reg::toO2RegRegShift() { |
michael@0 | 495 | return *(O2RegRegShift*)this; |
michael@0 | 496 | } |
michael@0 | 497 | |
michael@0 | 498 | Imm16::Imm16(Instruction &inst) |
michael@0 | 499 | : lower(inst.encode() & 0xfff), |
michael@0 | 500 | upper(inst.encode() >> 16), |
michael@0 | 501 | invalid(0xfff) |
michael@0 | 502 | { } |
michael@0 | 503 | |
michael@0 | 504 | Imm16::Imm16(uint32_t imm) |
michael@0 | 505 | : lower(imm & 0xfff), pad(0), |
michael@0 | 506 | upper((imm>>12) & 0xf), |
michael@0 | 507 | invalid(0) |
michael@0 | 508 | { |
michael@0 | 509 | JS_ASSERT(decode() == imm); |
michael@0 | 510 | } |
michael@0 | 511 | |
michael@0 | 512 | Imm16::Imm16() |
michael@0 | 513 | : invalid(0xfff) |
michael@0 | 514 | { } |
michael@0 | 515 | |
michael@0 | 516 | void |
michael@0 | 517 | jit::PatchJump(CodeLocationJump &jump_, CodeLocationLabel label) |
michael@0 | 518 | { |
michael@0 | 519 | // We need to determine if this jump can fit into the standard 24+2 bit address |
michael@0 | 520 | // or if we need a larger branch (or just need to use our pool entry) |
michael@0 | 521 | Instruction *jump = (Instruction*)jump_.raw(); |
michael@0 | 522 | Assembler::Condition c; |
michael@0 | 523 | jump->extractCond(&c); |
michael@0 | 524 | JS_ASSERT(jump->is<InstBranchImm>() || jump->is<InstLDR>()); |
michael@0 | 525 | |
michael@0 | 526 | int jumpOffset = label.raw() - jump_.raw(); |
michael@0 | 527 | if (BOffImm::isInRange(jumpOffset)) { |
michael@0 | 528 | // This instruction started off as a branch, and will remain one |
michael@0 | 529 | Assembler::retargetNearBranch(jump, jumpOffset, c); |
michael@0 | 530 | } else { |
michael@0 | 531 | // This instruction started off as a branch, but now needs to be demoted to an ldr. |
michael@0 | 532 | uint8_t **slot = reinterpret_cast<uint8_t**>(jump_.jumpTableEntry()); |
michael@0 | 533 | Assembler::retargetFarBranch(jump, slot, label.raw(), c); |
michael@0 | 534 | } |
michael@0 | 535 | } |
michael@0 | 536 | |
michael@0 | 537 | void |
michael@0 | 538 | Assembler::finish() |
michael@0 | 539 | { |
michael@0 | 540 | flush(); |
michael@0 | 541 | JS_ASSERT(!isFinished); |
michael@0 | 542 | isFinished = true; |
michael@0 | 543 | |
michael@0 | 544 | for (unsigned int i = 0; i < tmpDataRelocations_.length(); i++) { |
michael@0 | 545 | int offset = tmpDataRelocations_[i].getOffset(); |
michael@0 | 546 | int real_offset = offset + m_buffer.poolSizeBefore(offset); |
michael@0 | 547 | dataRelocations_.writeUnsigned(real_offset); |
michael@0 | 548 | } |
michael@0 | 549 | |
michael@0 | 550 | for (unsigned int i = 0; i < tmpJumpRelocations_.length(); i++) { |
michael@0 | 551 | int offset = tmpJumpRelocations_[i].getOffset(); |
michael@0 | 552 | int real_offset = offset + m_buffer.poolSizeBefore(offset); |
michael@0 | 553 | jumpRelocations_.writeUnsigned(real_offset); |
michael@0 | 554 | } |
michael@0 | 555 | |
michael@0 | 556 | for (unsigned int i = 0; i < tmpPreBarriers_.length(); i++) { |
michael@0 | 557 | int offset = tmpPreBarriers_[i].getOffset(); |
michael@0 | 558 | int real_offset = offset + m_buffer.poolSizeBefore(offset); |
michael@0 | 559 | preBarriers_.writeUnsigned(real_offset); |
michael@0 | 560 | } |
michael@0 | 561 | } |
michael@0 | 562 | |
michael@0 | 563 | void |
michael@0 | 564 | Assembler::executableCopy(uint8_t *buffer) |
michael@0 | 565 | { |
michael@0 | 566 | JS_ASSERT(isFinished); |
michael@0 | 567 | m_buffer.executableCopy(buffer); |
michael@0 | 568 | AutoFlushICache::setRange(uintptr_t(buffer), m_buffer.size()); |
michael@0 | 569 | } |
michael@0 | 570 | |
michael@0 | 571 | void |
michael@0 | 572 | Assembler::resetCounter() |
michael@0 | 573 | { |
michael@0 | 574 | m_buffer.resetCounter(); |
michael@0 | 575 | } |
michael@0 | 576 | |
michael@0 | 577 | uint32_t |
michael@0 | 578 | Assembler::actualOffset(uint32_t off_) const |
michael@0 | 579 | { |
michael@0 | 580 | return off_ + m_buffer.poolSizeBefore(off_); |
michael@0 | 581 | } |
michael@0 | 582 | |
michael@0 | 583 | uint32_t |
michael@0 | 584 | Assembler::actualIndex(uint32_t idx_) const |
michael@0 | 585 | { |
michael@0 | 586 | ARMBuffer::PoolEntry pe(idx_); |
michael@0 | 587 | return m_buffer.poolEntryOffset(pe); |
michael@0 | 588 | } |
michael@0 | 589 | |
michael@0 | 590 | uint8_t * |
michael@0 | 591 | Assembler::PatchableJumpAddress(JitCode *code, uint32_t pe_) |
michael@0 | 592 | { |
michael@0 | 593 | return code->raw() + pe_; |
michael@0 | 594 | } |
michael@0 | 595 | |
michael@0 | 596 | BufferOffset |
michael@0 | 597 | Assembler::actualOffset(BufferOffset off_) const |
michael@0 | 598 | { |
michael@0 | 599 | return BufferOffset(off_.getOffset() + m_buffer.poolSizeBefore(off_.getOffset())); |
michael@0 | 600 | } |
michael@0 | 601 | |
michael@0 | 602 | class RelocationIterator |
michael@0 | 603 | { |
michael@0 | 604 | CompactBufferReader reader_; |
michael@0 | 605 | // offset in bytes |
michael@0 | 606 | uint32_t offset_; |
michael@0 | 607 | |
michael@0 | 608 | public: |
michael@0 | 609 | RelocationIterator(CompactBufferReader &reader) |
michael@0 | 610 | : reader_(reader) |
michael@0 | 611 | { } |
michael@0 | 612 | |
michael@0 | 613 | bool read() { |
michael@0 | 614 | if (!reader_.more()) |
michael@0 | 615 | return false; |
michael@0 | 616 | offset_ = reader_.readUnsigned(); |
michael@0 | 617 | return true; |
michael@0 | 618 | } |
michael@0 | 619 | |
michael@0 | 620 | uint32_t offset() const { |
michael@0 | 621 | return offset_; |
michael@0 | 622 | } |
michael@0 | 623 | }; |
michael@0 | 624 | |
michael@0 | 625 | template<class Iter> |
michael@0 | 626 | const uint32_t * |
michael@0 | 627 | Assembler::getCF32Target(Iter *iter) |
michael@0 | 628 | { |
michael@0 | 629 | Instruction *inst1 = iter->cur(); |
michael@0 | 630 | Instruction *inst2 = iter->next(); |
michael@0 | 631 | Instruction *inst3 = iter->next(); |
michael@0 | 632 | Instruction *inst4 = iter->next(); |
michael@0 | 633 | |
michael@0 | 634 | if (inst1->is<InstBranchImm>()) { |
michael@0 | 635 | // see if we have a simple case, b #offset |
michael@0 | 636 | BOffImm imm; |
michael@0 | 637 | InstBranchImm *jumpB = inst1->as<InstBranchImm>(); |
michael@0 | 638 | jumpB->extractImm(&imm); |
michael@0 | 639 | return imm.getDest(inst1)->raw(); |
michael@0 | 640 | } |
michael@0 | 641 | |
michael@0 | 642 | if (inst1->is<InstMovW>() && inst2->is<InstMovT>() && |
michael@0 | 643 | (inst3->is<InstNOP>() || inst3->is<InstBranchReg>() || inst4->is<InstBranchReg>())) |
michael@0 | 644 | { |
michael@0 | 645 | // see if we have the complex case, |
michael@0 | 646 | // movw r_temp, #imm1 |
michael@0 | 647 | // movt r_temp, #imm2 |
michael@0 | 648 | // bx r_temp |
michael@0 | 649 | // OR |
michael@0 | 650 | // movw r_temp, #imm1 |
michael@0 | 651 | // movt r_temp, #imm2 |
michael@0 | 652 | // str pc, [sp] |
michael@0 | 653 | // bx r_temp |
michael@0 | 654 | |
michael@0 | 655 | Imm16 targ_bot; |
michael@0 | 656 | Imm16 targ_top; |
michael@0 | 657 | Register temp; |
michael@0 | 658 | |
michael@0 | 659 | // Extract both the temp register and the bottom immediate. |
michael@0 | 660 | InstMovW *bottom = inst1->as<InstMovW>(); |
michael@0 | 661 | bottom->extractImm(&targ_bot); |
michael@0 | 662 | bottom->extractDest(&temp); |
michael@0 | 663 | |
michael@0 | 664 | // Extract the top part of the immediate. |
michael@0 | 665 | InstMovT *top = inst2->as<InstMovT>(); |
michael@0 | 666 | top->extractImm(&targ_top); |
michael@0 | 667 | |
michael@0 | 668 | // Make sure they are being loaded into the same register. |
michael@0 | 669 | JS_ASSERT(top->checkDest(temp)); |
michael@0 | 670 | |
michael@0 | 671 | // Make sure we're branching to the same register. |
michael@0 | 672 | #ifdef DEBUG |
michael@0 | 673 | // A toggled call sometimes has a NOP instead of a branch for the third instruction. |
michael@0 | 674 | // No way to assert that it's valid in that situation. |
michael@0 | 675 | if (!inst3->is<InstNOP>()) { |
michael@0 | 676 | InstBranchReg *realBranch = inst3->is<InstBranchReg>() ? inst3->as<InstBranchReg>() |
michael@0 | 677 | : inst4->as<InstBranchReg>(); |
michael@0 | 678 | JS_ASSERT(realBranch->checkDest(temp)); |
michael@0 | 679 | } |
michael@0 | 680 | #endif |
michael@0 | 681 | |
michael@0 | 682 | uint32_t *dest = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16)); |
michael@0 | 683 | return dest; |
michael@0 | 684 | } |
michael@0 | 685 | |
michael@0 | 686 | if (inst1->is<InstLDR>()) { |
michael@0 | 687 | InstLDR *load = inst1->as<InstLDR>(); |
michael@0 | 688 | uint32_t inst = load->encode(); |
michael@0 | 689 | // get the address of the instruction as a raw pointer |
michael@0 | 690 | char *dataInst = reinterpret_cast<char*>(load); |
michael@0 | 691 | IsUp_ iu = IsUp_(inst & IsUp); |
michael@0 | 692 | int32_t offset = inst & 0xfff; |
michael@0 | 693 | if (iu != IsUp) { |
michael@0 | 694 | offset = - offset; |
michael@0 | 695 | } |
michael@0 | 696 | uint32_t **ptr = (uint32_t **)&dataInst[offset + 8]; |
michael@0 | 697 | return *ptr; |
michael@0 | 698 | |
michael@0 | 699 | } |
michael@0 | 700 | |
michael@0 | 701 | MOZ_ASSUME_UNREACHABLE("unsupported branch relocation"); |
michael@0 | 702 | } |
michael@0 | 703 | |
michael@0 | 704 | uintptr_t |
michael@0 | 705 | Assembler::getPointer(uint8_t *instPtr) |
michael@0 | 706 | { |
michael@0 | 707 | InstructionIterator iter((Instruction*)instPtr); |
michael@0 | 708 | uintptr_t ret = (uintptr_t)getPtr32Target(&iter, nullptr, nullptr); |
michael@0 | 709 | return ret; |
michael@0 | 710 | } |
michael@0 | 711 | |
michael@0 | 712 | template<class Iter> |
michael@0 | 713 | const uint32_t * |
michael@0 | 714 | Assembler::getPtr32Target(Iter *start, Register *dest, RelocStyle *style) |
michael@0 | 715 | { |
michael@0 | 716 | Instruction *load1 = start->cur(); |
michael@0 | 717 | Instruction *load2 = start->next(); |
michael@0 | 718 | |
michael@0 | 719 | if (load1->is<InstMovW>() && load2->is<InstMovT>()) { |
michael@0 | 720 | // see if we have the complex case, |
michael@0 | 721 | // movw r_temp, #imm1 |
michael@0 | 722 | // movt r_temp, #imm2 |
michael@0 | 723 | |
michael@0 | 724 | Imm16 targ_bot; |
michael@0 | 725 | Imm16 targ_top; |
michael@0 | 726 | Register temp; |
michael@0 | 727 | |
michael@0 | 728 | // Extract both the temp register and the bottom immediate. |
michael@0 | 729 | InstMovW *bottom = load1->as<InstMovW>(); |
michael@0 | 730 | bottom->extractImm(&targ_bot); |
michael@0 | 731 | bottom->extractDest(&temp); |
michael@0 | 732 | |
michael@0 | 733 | // Extract the top part of the immediate. |
michael@0 | 734 | InstMovT *top = load2->as<InstMovT>(); |
michael@0 | 735 | top->extractImm(&targ_top); |
michael@0 | 736 | |
michael@0 | 737 | // Make sure they are being loaded intothe same register. |
michael@0 | 738 | JS_ASSERT(top->checkDest(temp)); |
michael@0 | 739 | |
michael@0 | 740 | if (dest) |
michael@0 | 741 | *dest = temp; |
michael@0 | 742 | if (style) |
michael@0 | 743 | *style = L_MOVWT; |
michael@0 | 744 | |
michael@0 | 745 | uint32_t *value = (uint32_t*) (targ_bot.decode() | (targ_top.decode() << 16)); |
michael@0 | 746 | return value; |
michael@0 | 747 | } |
michael@0 | 748 | if (load1->is<InstLDR>()) { |
michael@0 | 749 | InstLDR *load = load1->as<InstLDR>(); |
michael@0 | 750 | uint32_t inst = load->encode(); |
michael@0 | 751 | // get the address of the instruction as a raw pointer |
michael@0 | 752 | char *dataInst = reinterpret_cast<char*>(load); |
michael@0 | 753 | IsUp_ iu = IsUp_(inst & IsUp); |
michael@0 | 754 | int32_t offset = inst & 0xfff; |
michael@0 | 755 | if (iu == IsDown) |
michael@0 | 756 | offset = - offset; |
michael@0 | 757 | if (dest) |
michael@0 | 758 | *dest = toRD(*load); |
michael@0 | 759 | if (style) |
michael@0 | 760 | *style = L_LDR; |
michael@0 | 761 | uint32_t **ptr = (uint32_t **)&dataInst[offset + 8]; |
michael@0 | 762 | return *ptr; |
michael@0 | 763 | } |
michael@0 | 764 | MOZ_ASSUME_UNREACHABLE("unsupported relocation"); |
michael@0 | 765 | } |
michael@0 | 766 | |
michael@0 | 767 | static JitCode * |
michael@0 | 768 | CodeFromJump(InstructionIterator *jump) |
michael@0 | 769 | { |
michael@0 | 770 | uint8_t *target = (uint8_t *)Assembler::getCF32Target(jump); |
michael@0 | 771 | return JitCode::FromExecutable(target); |
michael@0 | 772 | } |
michael@0 | 773 | |
michael@0 | 774 | void |
michael@0 | 775 | Assembler::TraceJumpRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader) |
michael@0 | 776 | { |
michael@0 | 777 | RelocationIterator iter(reader); |
michael@0 | 778 | while (iter.read()) { |
michael@0 | 779 | InstructionIterator institer((Instruction *) (code->raw() + iter.offset())); |
michael@0 | 780 | JitCode *child = CodeFromJump(&institer); |
michael@0 | 781 | MarkJitCodeUnbarriered(trc, &child, "rel32"); |
michael@0 | 782 | } |
michael@0 | 783 | } |
michael@0 | 784 | |
michael@0 | 785 | static void |
michael@0 | 786 | TraceDataRelocations(JSTracer *trc, uint8_t *buffer, CompactBufferReader &reader) |
michael@0 | 787 | { |
michael@0 | 788 | while (reader.more()) { |
michael@0 | 789 | size_t offset = reader.readUnsigned(); |
michael@0 | 790 | InstructionIterator iter((Instruction*)(buffer+offset)); |
michael@0 | 791 | void *ptr = const_cast<uint32_t *>(js::jit::Assembler::getPtr32Target(&iter)); |
michael@0 | 792 | // No barrier needed since these are constants. |
michael@0 | 793 | gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr"); |
michael@0 | 794 | } |
michael@0 | 795 | |
michael@0 | 796 | } |
michael@0 | 797 | static void |
michael@0 | 798 | TraceDataRelocations(JSTracer *trc, ARMBuffer *buffer, |
michael@0 | 799 | js::Vector<BufferOffset, 0, SystemAllocPolicy> *locs) |
michael@0 | 800 | { |
michael@0 | 801 | for (unsigned int idx = 0; idx < locs->length(); idx++) { |
michael@0 | 802 | BufferOffset bo = (*locs)[idx]; |
michael@0 | 803 | ARMBuffer::AssemblerBufferInstIterator iter(bo, buffer); |
michael@0 | 804 | void *ptr = const_cast<uint32_t *>(jit::Assembler::getPtr32Target(&iter)); |
michael@0 | 805 | |
michael@0 | 806 | // No barrier needed since these are constants. |
michael@0 | 807 | gc::MarkGCThingUnbarriered(trc, reinterpret_cast<void **>(&ptr), "ion-masm-ptr"); |
michael@0 | 808 | } |
michael@0 | 809 | |
michael@0 | 810 | } |
michael@0 | 811 | void |
michael@0 | 812 | Assembler::TraceDataRelocations(JSTracer *trc, JitCode *code, CompactBufferReader &reader) |
michael@0 | 813 | { |
michael@0 | 814 | ::TraceDataRelocations(trc, code->raw(), reader); |
michael@0 | 815 | } |
michael@0 | 816 | |
michael@0 | 817 | void |
michael@0 | 818 | Assembler::copyJumpRelocationTable(uint8_t *dest) |
michael@0 | 819 | { |
michael@0 | 820 | if (jumpRelocations_.length()) |
michael@0 | 821 | memcpy(dest, jumpRelocations_.buffer(), jumpRelocations_.length()); |
michael@0 | 822 | } |
michael@0 | 823 | |
michael@0 | 824 | void |
michael@0 | 825 | Assembler::copyDataRelocationTable(uint8_t *dest) |
michael@0 | 826 | { |
michael@0 | 827 | if (dataRelocations_.length()) |
michael@0 | 828 | memcpy(dest, dataRelocations_.buffer(), dataRelocations_.length()); |
michael@0 | 829 | } |
michael@0 | 830 | |
michael@0 | 831 | void |
michael@0 | 832 | Assembler::copyPreBarrierTable(uint8_t *dest) |
michael@0 | 833 | { |
michael@0 | 834 | if (preBarriers_.length()) |
michael@0 | 835 | memcpy(dest, preBarriers_.buffer(), preBarriers_.length()); |
michael@0 | 836 | } |
michael@0 | 837 | |
michael@0 | 838 | void |
michael@0 | 839 | Assembler::trace(JSTracer *trc) |
michael@0 | 840 | { |
michael@0 | 841 | for (size_t i = 0; i < jumps_.length(); i++) { |
michael@0 | 842 | RelativePatch &rp = jumps_[i]; |
michael@0 | 843 | if (rp.kind == Relocation::JITCODE) { |
michael@0 | 844 | JitCode *code = JitCode::FromExecutable((uint8_t*)rp.target); |
michael@0 | 845 | MarkJitCodeUnbarriered(trc, &code, "masmrel32"); |
michael@0 | 846 | JS_ASSERT(code == JitCode::FromExecutable((uint8_t*)rp.target)); |
michael@0 | 847 | } |
michael@0 | 848 | } |
michael@0 | 849 | |
michael@0 | 850 | if (tmpDataRelocations_.length()) |
michael@0 | 851 | ::TraceDataRelocations(trc, &m_buffer, &tmpDataRelocations_); |
michael@0 | 852 | } |
michael@0 | 853 | |
michael@0 | 854 | void |
michael@0 | 855 | Assembler::processCodeLabels(uint8_t *rawCode) |
michael@0 | 856 | { |
michael@0 | 857 | for (size_t i = 0; i < codeLabels_.length(); i++) { |
michael@0 | 858 | CodeLabel label = codeLabels_[i]; |
michael@0 | 859 | Bind(rawCode, label.dest(), rawCode + actualOffset(label.src()->offset())); |
michael@0 | 860 | } |
michael@0 | 861 | } |
michael@0 | 862 | |
michael@0 | 863 | void |
michael@0 | 864 | Assembler::writeCodePointer(AbsoluteLabel *absoluteLabel) { |
michael@0 | 865 | JS_ASSERT(!absoluteLabel->bound()); |
michael@0 | 866 | BufferOffset off = writeInst(LabelBase::INVALID_OFFSET); |
michael@0 | 867 | |
michael@0 | 868 | // x86/x64 makes general use of AbsoluteLabel and weaves a linked list of |
michael@0 | 869 | // uses of an AbsoluteLabel through the assembly. ARM only uses labels |
michael@0 | 870 | // for the case statements of switch jump tables. Thus, for simplicity, we |
michael@0 | 871 | // simply treat the AbsoluteLabel as a label and bind it to the offset of |
michael@0 | 872 | // the jump table entry that needs to be patched. |
michael@0 | 873 | LabelBase *label = absoluteLabel; |
michael@0 | 874 | label->bind(off.getOffset()); |
michael@0 | 875 | } |
michael@0 | 876 | |
michael@0 | 877 | void |
michael@0 | 878 | Assembler::Bind(uint8_t *rawCode, AbsoluteLabel *label, const void *address) |
michael@0 | 879 | { |
michael@0 | 880 | // See writeCodePointer comment. |
michael@0 | 881 | uint32_t off = actualOffset(label->offset()); |
michael@0 | 882 | *reinterpret_cast<const void **>(rawCode + off) = address; |
michael@0 | 883 | } |
michael@0 | 884 | |
michael@0 | 885 | Assembler::Condition |
michael@0 | 886 | Assembler::InvertCondition(Condition cond) |
michael@0 | 887 | { |
michael@0 | 888 | const uint32_t ConditionInversionBit = 0x10000000; |
michael@0 | 889 | return Condition(ConditionInversionBit ^ cond); |
michael@0 | 890 | } |
michael@0 | 891 | |
michael@0 | 892 | Imm8::TwoImm8mData |
michael@0 | 893 | Imm8::encodeTwoImms(uint32_t imm) |
michael@0 | 894 | { |
michael@0 | 895 | // In the ideal case, we are looking for a number that (in binary) looks like: |
michael@0 | 896 | // 0b((00)*)n_1((00)*)n_2((00)*) |
michael@0 | 897 | // left n1 mid n2 |
michael@0 | 898 | // where both n_1 and n_2 fit into 8 bits. |
michael@0 | 899 | // since this is being done with rotates, we also need to handle the case |
michael@0 | 900 | // that one of these numbers is in fact split between the left and right |
michael@0 | 901 | // sides, in which case the constant will look like: |
michael@0 | 902 | // 0bn_1a((00)*)n_2((00)*)n_1b |
michael@0 | 903 | // n1a mid n2 rgh n1b |
michael@0 | 904 | // also remember, values are rotated by multiples of two, and left, |
michael@0 | 905 | // mid or right can have length zero |
michael@0 | 906 | uint32_t imm1, imm2; |
michael@0 | 907 | int left = CountLeadingZeroes32(imm) & 0x1E; |
michael@0 | 908 | uint32_t no_n1 = imm & ~(0xff << (24 - left)); |
michael@0 | 909 | |
michael@0 | 910 | // not technically needed: this case only happens if we can encode |
michael@0 | 911 | // as a single imm8m. There is a perfectly reasonable encoding in this |
michael@0 | 912 | // case, but we shouldn't encourage people to do things like this. |
michael@0 | 913 | if (no_n1 == 0) |
michael@0 | 914 | return TwoImm8mData(); |
michael@0 | 915 | |
michael@0 | 916 | int mid = CountLeadingZeroes32(no_n1) & 0x1E; |
michael@0 | 917 | uint32_t no_n2 = no_n1 & ~((0xff << ((24 - mid) & 0x1f)) | 0xff >> ((8 + mid) & 0x1f)); |
michael@0 | 918 | |
michael@0 | 919 | if (no_n2 == 0) { |
michael@0 | 920 | // we hit the easy case, no wraparound. |
michael@0 | 921 | // note: a single constant *may* look like this. |
michael@0 | 922 | int imm1shift = left + 8; |
michael@0 | 923 | int imm2shift = mid + 8; |
michael@0 | 924 | imm1 = (imm >> (32 - imm1shift)) & 0xff; |
michael@0 | 925 | if (imm2shift >= 32) { |
michael@0 | 926 | imm2shift = 0; |
michael@0 | 927 | // this assert does not always hold |
michael@0 | 928 | //assert((imm & 0xff) == no_n1); |
michael@0 | 929 | // in fact, this would lead to some incredibly subtle bugs. |
michael@0 | 930 | imm2 = no_n1; |
michael@0 | 931 | } else { |
michael@0 | 932 | imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff; |
michael@0 | 933 | JS_ASSERT( ((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == |
michael@0 | 934 | imm2); |
michael@0 | 935 | } |
michael@0 | 936 | JS_ASSERT((imm1shift & 0x1) == 0); |
michael@0 | 937 | JS_ASSERT((imm2shift & 0x1) == 0); |
michael@0 | 938 | return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1), |
michael@0 | 939 | datastore::Imm8mData(imm2, imm2shift >> 1)); |
michael@0 | 940 | } |
michael@0 | 941 | |
michael@0 | 942 | // either it wraps, or it does not fit. |
michael@0 | 943 | // if we initially chopped off more than 8 bits, then it won't fit. |
michael@0 | 944 | if (left >= 8) |
michael@0 | 945 | return TwoImm8mData(); |
michael@0 | 946 | |
michael@0 | 947 | int right = 32 - (CountLeadingZeroes32(no_n2) & 30); |
michael@0 | 948 | // all remaining set bits *must* fit into the lower 8 bits |
michael@0 | 949 | // the right == 8 case should be handled by the previous case. |
michael@0 | 950 | if (right > 8) |
michael@0 | 951 | return TwoImm8mData(); |
michael@0 | 952 | |
michael@0 | 953 | // make sure the initial bits that we removed for no_n1 |
michael@0 | 954 | // fit into the 8-(32-right) leftmost bits |
michael@0 | 955 | if (((imm & (0xff << (24 - left))) << (8-right)) != 0) { |
michael@0 | 956 | // BUT we may have removed more bits than we needed to for no_n1 |
michael@0 | 957 | // 0x04104001 e.g. we can encode 0x104 with a single op, then |
michael@0 | 958 | // 0x04000001 with a second, but we try to encode 0x0410000 |
michael@0 | 959 | // and find that we need a second op for 0x4000, and 0x1 cannot |
michael@0 | 960 | // be included in the encoding of 0x04100000 |
michael@0 | 961 | no_n1 = imm & ~((0xff >> (8-right)) | (0xff << (24 + right))); |
michael@0 | 962 | mid = CountLeadingZeroes32(no_n1) & 30; |
michael@0 | 963 | no_n2 = |
michael@0 | 964 | no_n1 & ~((0xff << ((24 - mid)&31)) | 0xff >> ((8 + mid)&31)); |
michael@0 | 965 | if (no_n2 != 0) |
michael@0 | 966 | return TwoImm8mData(); |
michael@0 | 967 | } |
michael@0 | 968 | |
michael@0 | 969 | // now assemble all of this information into a two coherent constants |
michael@0 | 970 | // it is a rotate right from the lower 8 bits. |
michael@0 | 971 | int imm1shift = 8 - right; |
michael@0 | 972 | imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift))); |
michael@0 | 973 | JS_ASSERT ((imm1shift&~0x1e) == 0); |
michael@0 | 974 | // left + 8 + mid is the position of the leftmost bit of n_2. |
michael@0 | 975 | // we needed to rotate 0x000000ab right by 8 in order to get |
michael@0 | 976 | // 0xab000000, then shift again by the leftmost bit in order to |
michael@0 | 977 | // get the constant that we care about. |
michael@0 | 978 | int imm2shift = mid + 8; |
michael@0 | 979 | imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff; |
michael@0 | 980 | JS_ASSERT((imm1shift & 0x1) == 0); |
michael@0 | 981 | JS_ASSERT((imm2shift & 0x1) == 0); |
michael@0 | 982 | return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1), |
michael@0 | 983 | datastore::Imm8mData(imm2, imm2shift >> 1)); |
michael@0 | 984 | } |
michael@0 | 985 | |
michael@0 | 986 | ALUOp |
michael@0 | 987 | jit::ALUNeg(ALUOp op, Register dest, Imm32 *imm, Register *negDest) |
michael@0 | 988 | { |
michael@0 | 989 | // find an alternate ALUOp to get the job done, and use a different imm. |
michael@0 | 990 | *negDest = dest; |
michael@0 | 991 | switch (op) { |
michael@0 | 992 | case op_mov: |
michael@0 | 993 | *imm = Imm32(~imm->value); |
michael@0 | 994 | return op_mvn; |
michael@0 | 995 | case op_mvn: |
michael@0 | 996 | *imm = Imm32(~imm->value); |
michael@0 | 997 | return op_mov; |
michael@0 | 998 | case op_and: |
michael@0 | 999 | *imm = Imm32(~imm->value); |
michael@0 | 1000 | return op_bic; |
michael@0 | 1001 | case op_bic: |
michael@0 | 1002 | *imm = Imm32(~imm->value); |
michael@0 | 1003 | return op_and; |
michael@0 | 1004 | case op_add: |
michael@0 | 1005 | *imm = Imm32(-imm->value); |
michael@0 | 1006 | return op_sub; |
michael@0 | 1007 | case op_sub: |
michael@0 | 1008 | *imm = Imm32(-imm->value); |
michael@0 | 1009 | return op_add; |
michael@0 | 1010 | case op_cmp: |
michael@0 | 1011 | *imm = Imm32(-imm->value); |
michael@0 | 1012 | return op_cmn; |
michael@0 | 1013 | case op_cmn: |
michael@0 | 1014 | *imm = Imm32(-imm->value); |
michael@0 | 1015 | return op_cmp; |
michael@0 | 1016 | case op_tst: |
michael@0 | 1017 | JS_ASSERT(dest == InvalidReg); |
michael@0 | 1018 | *imm = Imm32(~imm->value); |
michael@0 | 1019 | *negDest = ScratchRegister; |
michael@0 | 1020 | return op_bic; |
michael@0 | 1021 | // orr has orn on thumb2 only. |
michael@0 | 1022 | default: |
michael@0 | 1023 | return op_invalid; |
michael@0 | 1024 | } |
michael@0 | 1025 | } |
michael@0 | 1026 | |
michael@0 | 1027 | bool |
michael@0 | 1028 | jit::can_dbl(ALUOp op) |
michael@0 | 1029 | { |
michael@0 | 1030 | // some instructions can't be processed as two separate instructions |
michael@0 | 1031 | // such as and, and possibly add (when we're setting ccodes). |
michael@0 | 1032 | // there is also some hilarity with *reading* condition codes. |
michael@0 | 1033 | // for example, adc dest, src1, 0xfff; (add with carry) can be split up |
michael@0 | 1034 | // into adc dest, src1, 0xf00; add dest, dest, 0xff, since "reading" the |
michael@0 | 1035 | // condition code increments the result by one conditionally, that only needs |
michael@0 | 1036 | // to be done on one of the two instructions. |
michael@0 | 1037 | switch (op) { |
michael@0 | 1038 | case op_bic: |
michael@0 | 1039 | case op_add: |
michael@0 | 1040 | case op_sub: |
michael@0 | 1041 | case op_eor: |
michael@0 | 1042 | case op_orr: |
michael@0 | 1043 | return true; |
michael@0 | 1044 | default: |
michael@0 | 1045 | return false; |
michael@0 | 1046 | } |
michael@0 | 1047 | } |
michael@0 | 1048 | |
michael@0 | 1049 | bool |
michael@0 | 1050 | jit::condsAreSafe(ALUOp op) { |
michael@0 | 1051 | // Even when we are setting condition codes, sometimes we can |
michael@0 | 1052 | // get away with splitting an operation into two. |
michael@0 | 1053 | // for example, if our immediate is 0x00ff00ff, and the operation is eors |
michael@0 | 1054 | // we can split this in half, since x ^ 0x00ff0000 ^ 0x000000ff should |
michael@0 | 1055 | // set all of its condition codes exactly the same as x ^ 0x00ff00ff. |
michael@0 | 1056 | // However, if the operation were adds, |
michael@0 | 1057 | // we cannot split this in half. If the source on the add is |
michael@0 | 1058 | // 0xfff00ff0, the result sholud be 0xef10ef, but do we set the overflow bit |
michael@0 | 1059 | // or not? Depending on which half is performed first (0x00ff0000 |
michael@0 | 1060 | // or 0x000000ff) the V bit will be set differently, and *not* updating |
michael@0 | 1061 | // the V bit would be wrong. Theoretically, the following should work |
michael@0 | 1062 | // adds r0, r1, 0x00ff0000; |
michael@0 | 1063 | // addsvs r0, r1, 0x000000ff; |
michael@0 | 1064 | // addvc r0, r1, 0x000000ff; |
michael@0 | 1065 | // but this is 3 instructions, and at that point, we might as well use |
michael@0 | 1066 | // something else. |
michael@0 | 1067 | switch(op) { |
michael@0 | 1068 | case op_bic: |
michael@0 | 1069 | case op_orr: |
michael@0 | 1070 | case op_eor: |
michael@0 | 1071 | return true; |
michael@0 | 1072 | default: |
michael@0 | 1073 | return false; |
michael@0 | 1074 | } |
michael@0 | 1075 | } |
michael@0 | 1076 | |
michael@0 | 1077 | ALUOp |
michael@0 | 1078 | jit::getDestVariant(ALUOp op) |
michael@0 | 1079 | { |
michael@0 | 1080 | // all of the compare operations are dest-less variants of a standard |
michael@0 | 1081 | // operation. Given the dest-less variant, return the dest-ful variant. |
michael@0 | 1082 | switch (op) { |
michael@0 | 1083 | case op_cmp: |
michael@0 | 1084 | return op_sub; |
michael@0 | 1085 | case op_cmn: |
michael@0 | 1086 | return op_add; |
michael@0 | 1087 | case op_tst: |
michael@0 | 1088 | return op_and; |
michael@0 | 1089 | case op_teq: |
michael@0 | 1090 | return op_eor; |
michael@0 | 1091 | default: |
michael@0 | 1092 | return op; |
michael@0 | 1093 | } |
michael@0 | 1094 | } |
michael@0 | 1095 | |
michael@0 | 1096 | O2RegImmShift |
michael@0 | 1097 | jit::O2Reg(Register r) { |
michael@0 | 1098 | return O2RegImmShift(r, LSL, 0); |
michael@0 | 1099 | } |
michael@0 | 1100 | |
michael@0 | 1101 | O2RegImmShift |
michael@0 | 1102 | jit::lsl(Register r, int amt) |
michael@0 | 1103 | { |
michael@0 | 1104 | JS_ASSERT(0 <= amt && amt <= 31); |
michael@0 | 1105 | return O2RegImmShift(r, LSL, amt); |
michael@0 | 1106 | } |
michael@0 | 1107 | |
michael@0 | 1108 | O2RegImmShift |
michael@0 | 1109 | jit::lsr(Register r, int amt) |
michael@0 | 1110 | { |
michael@0 | 1111 | JS_ASSERT(1 <= amt && amt <= 32); |
michael@0 | 1112 | return O2RegImmShift(r, LSR, amt); |
michael@0 | 1113 | } |
michael@0 | 1114 | |
michael@0 | 1115 | O2RegImmShift |
michael@0 | 1116 | jit::ror(Register r, int amt) |
michael@0 | 1117 | { |
michael@0 | 1118 | JS_ASSERT(1 <= amt && amt <= 31); |
michael@0 | 1119 | return O2RegImmShift(r, ROR, amt); |
michael@0 | 1120 | } |
michael@0 | 1121 | O2RegImmShift |
michael@0 | 1122 | jit::rol(Register r, int amt) |
michael@0 | 1123 | { |
michael@0 | 1124 | JS_ASSERT(1 <= amt && amt <= 31); |
michael@0 | 1125 | return O2RegImmShift(r, ROR, 32 - amt); |
michael@0 | 1126 | } |
michael@0 | 1127 | |
michael@0 | 1128 | O2RegImmShift |
michael@0 | 1129 | jit::asr (Register r, int amt) |
michael@0 | 1130 | { |
michael@0 | 1131 | JS_ASSERT(1 <= amt && amt <= 32); |
michael@0 | 1132 | return O2RegImmShift(r, ASR, amt); |
michael@0 | 1133 | } |
michael@0 | 1134 | |
michael@0 | 1135 | |
michael@0 | 1136 | O2RegRegShift |
michael@0 | 1137 | jit::lsl(Register r, Register amt) |
michael@0 | 1138 | { |
michael@0 | 1139 | return O2RegRegShift(r, LSL, amt); |
michael@0 | 1140 | } |
michael@0 | 1141 | |
michael@0 | 1142 | O2RegRegShift |
michael@0 | 1143 | jit::lsr(Register r, Register amt) |
michael@0 | 1144 | { |
michael@0 | 1145 | return O2RegRegShift(r, LSR, amt); |
michael@0 | 1146 | } |
michael@0 | 1147 | |
michael@0 | 1148 | O2RegRegShift |
michael@0 | 1149 | jit::ror(Register r, Register amt) |
michael@0 | 1150 | { |
michael@0 | 1151 | return O2RegRegShift(r, ROR, amt); |
michael@0 | 1152 | } |
michael@0 | 1153 | |
michael@0 | 1154 | O2RegRegShift |
michael@0 | 1155 | jit::asr (Register r, Register amt) |
michael@0 | 1156 | { |
michael@0 | 1157 | return O2RegRegShift(r, ASR, amt); |
michael@0 | 1158 | } |
michael@0 | 1159 | |
michael@0 | 1160 | static js::jit::DoubleEncoder doubleEncoder; |
michael@0 | 1161 | |
michael@0 | 1162 | /* static */ const js::jit::VFPImm js::jit::VFPImm::one(0x3FF00000); |
michael@0 | 1163 | |
michael@0 | 1164 | js::jit::VFPImm::VFPImm(uint32_t top) |
michael@0 | 1165 | { |
michael@0 | 1166 | data = -1; |
michael@0 | 1167 | datastore::Imm8VFPImmData tmp; |
michael@0 | 1168 | if (doubleEncoder.lookup(top, &tmp)) |
michael@0 | 1169 | data = tmp.encode(); |
michael@0 | 1170 | } |
michael@0 | 1171 | |
michael@0 | 1172 | BOffImm::BOffImm(Instruction &inst) |
michael@0 | 1173 | : data(inst.encode() & 0x00ffffff) |
michael@0 | 1174 | { |
michael@0 | 1175 | } |
michael@0 | 1176 | |
michael@0 | 1177 | Instruction * |
michael@0 | 1178 | BOffImm::getDest(Instruction *src) |
michael@0 | 1179 | { |
michael@0 | 1180 | // TODO: It is probably worthwhile to verify that src is actually a branch |
michael@0 | 1181 | // NOTE: This does not explicitly shift the offset of the destination left by 2, |
michael@0 | 1182 | // since it is indexing into an array of instruction sized objects. |
michael@0 | 1183 | return &src[(((int32_t)data<<8)>>8) + 2]; |
michael@0 | 1184 | } |
michael@0 | 1185 | |
michael@0 | 1186 | //VFPRegister implementation |
michael@0 | 1187 | VFPRegister |
michael@0 | 1188 | VFPRegister::doubleOverlay() const |
michael@0 | 1189 | { |
michael@0 | 1190 | JS_ASSERT(!_isInvalid); |
michael@0 | 1191 | if (kind != Double) { |
michael@0 | 1192 | JS_ASSERT(_code % 2 == 0); |
michael@0 | 1193 | return VFPRegister(_code >> 1, Double); |
michael@0 | 1194 | } |
michael@0 | 1195 | return *this; |
michael@0 | 1196 | } |
michael@0 | 1197 | VFPRegister |
michael@0 | 1198 | VFPRegister::singleOverlay() const |
michael@0 | 1199 | { |
michael@0 | 1200 | JS_ASSERT(!_isInvalid); |
michael@0 | 1201 | if (kind == Double) { |
michael@0 | 1202 | // There are no corresponding float registers for d16-d31 |
michael@0 | 1203 | JS_ASSERT(_code < 16); |
michael@0 | 1204 | return VFPRegister(_code << 1, Single); |
michael@0 | 1205 | } |
michael@0 | 1206 | |
michael@0 | 1207 | JS_ASSERT(_code % 2 == 0); |
michael@0 | 1208 | return VFPRegister(_code, Single); |
michael@0 | 1209 | } |
michael@0 | 1210 | |
michael@0 | 1211 | VFPRegister |
michael@0 | 1212 | VFPRegister::sintOverlay() const |
michael@0 | 1213 | { |
michael@0 | 1214 | JS_ASSERT(!_isInvalid); |
michael@0 | 1215 | if (kind == Double) { |
michael@0 | 1216 | // There are no corresponding float registers for d16-d31 |
michael@0 | 1217 | ASSERT(_code < 16); |
michael@0 | 1218 | return VFPRegister(_code << 1, Int); |
michael@0 | 1219 | } |
michael@0 | 1220 | |
michael@0 | 1221 | JS_ASSERT(_code % 2 == 0); |
michael@0 | 1222 | return VFPRegister(_code, Int); |
michael@0 | 1223 | } |
michael@0 | 1224 | VFPRegister |
michael@0 | 1225 | VFPRegister::uintOverlay() const |
michael@0 | 1226 | { |
michael@0 | 1227 | JS_ASSERT(!_isInvalid); |
michael@0 | 1228 | if (kind == Double) { |
michael@0 | 1229 | // There are no corresponding float registers for d16-d31 |
michael@0 | 1230 | ASSERT(_code < 16); |
michael@0 | 1231 | return VFPRegister(_code << 1, UInt); |
michael@0 | 1232 | } |
michael@0 | 1233 | |
michael@0 | 1234 | JS_ASSERT(_code % 2 == 0); |
michael@0 | 1235 | return VFPRegister(_code, UInt); |
michael@0 | 1236 | } |
michael@0 | 1237 | |
michael@0 | 1238 | bool |
michael@0 | 1239 | VFPRegister::isInvalid() |
michael@0 | 1240 | { |
michael@0 | 1241 | return _isInvalid; |
michael@0 | 1242 | } |
michael@0 | 1243 | |
michael@0 | 1244 | bool |
michael@0 | 1245 | VFPRegister::isMissing() |
michael@0 | 1246 | { |
michael@0 | 1247 | JS_ASSERT(!_isInvalid); |
michael@0 | 1248 | return _isMissing; |
michael@0 | 1249 | } |
michael@0 | 1250 | |
michael@0 | 1251 | |
michael@0 | 1252 | bool |
michael@0 | 1253 | Assembler::oom() const |
michael@0 | 1254 | { |
michael@0 | 1255 | return m_buffer.oom() || |
michael@0 | 1256 | !enoughMemory_ || |
michael@0 | 1257 | jumpRelocations_.oom() || |
michael@0 | 1258 | dataRelocations_.oom() || |
michael@0 | 1259 | preBarriers_.oom(); |
michael@0 | 1260 | } |
michael@0 | 1261 | |
michael@0 | 1262 | bool |
michael@0 | 1263 | Assembler::addCodeLabel(CodeLabel label) |
michael@0 | 1264 | { |
michael@0 | 1265 | return codeLabels_.append(label); |
michael@0 | 1266 | } |
michael@0 | 1267 | |
michael@0 | 1268 | // Size of the instruction stream, in bytes. Including pools. This function expects |
michael@0 | 1269 | // all pools that need to be placed have been placed. If they haven't then we |
michael@0 | 1270 | // need to go an flush the pools :( |
michael@0 | 1271 | size_t |
michael@0 | 1272 | Assembler::size() const |
michael@0 | 1273 | { |
michael@0 | 1274 | return m_buffer.size(); |
michael@0 | 1275 | } |
michael@0 | 1276 | // Size of the relocation table, in bytes. |
michael@0 | 1277 | size_t |
michael@0 | 1278 | Assembler::jumpRelocationTableBytes() const |
michael@0 | 1279 | { |
michael@0 | 1280 | return jumpRelocations_.length(); |
michael@0 | 1281 | } |
michael@0 | 1282 | size_t |
michael@0 | 1283 | Assembler::dataRelocationTableBytes() const |
michael@0 | 1284 | { |
michael@0 | 1285 | return dataRelocations_.length(); |
michael@0 | 1286 | } |
michael@0 | 1287 | |
michael@0 | 1288 | size_t |
michael@0 | 1289 | Assembler::preBarrierTableBytes() const |
michael@0 | 1290 | { |
michael@0 | 1291 | return preBarriers_.length(); |
michael@0 | 1292 | } |
michael@0 | 1293 | |
michael@0 | 1294 | // Size of the data table, in bytes. |
michael@0 | 1295 | size_t |
michael@0 | 1296 | Assembler::bytesNeeded() const |
michael@0 | 1297 | { |
michael@0 | 1298 | return size() + |
michael@0 | 1299 | jumpRelocationTableBytes() + |
michael@0 | 1300 | dataRelocationTableBytes() + |
michael@0 | 1301 | preBarrierTableBytes(); |
michael@0 | 1302 | } |
michael@0 | 1303 | |
michael@0 | 1304 | // write a blob of binary into the instruction stream |
michael@0 | 1305 | BufferOffset |
michael@0 | 1306 | Assembler::writeInst(uint32_t x, uint32_t *dest) |
michael@0 | 1307 | { |
michael@0 | 1308 | if (dest == nullptr) |
michael@0 | 1309 | return m_buffer.putInt(x); |
michael@0 | 1310 | |
michael@0 | 1311 | writeInstStatic(x, dest); |
michael@0 | 1312 | return BufferOffset(); |
michael@0 | 1313 | } |
michael@0 | 1314 | void |
michael@0 | 1315 | Assembler::writeInstStatic(uint32_t x, uint32_t *dest) |
michael@0 | 1316 | { |
michael@0 | 1317 | JS_ASSERT(dest != nullptr); |
michael@0 | 1318 | *dest = x; |
michael@0 | 1319 | } |
michael@0 | 1320 | |
michael@0 | 1321 | BufferOffset |
michael@0 | 1322 | Assembler::align(int alignment) |
michael@0 | 1323 | { |
michael@0 | 1324 | BufferOffset ret; |
michael@0 | 1325 | if (alignment == 8) { |
michael@0 | 1326 | while (!m_buffer.isAligned(alignment)) { |
michael@0 | 1327 | BufferOffset tmp = as_nop(); |
michael@0 | 1328 | if (!ret.assigned()) |
michael@0 | 1329 | ret = tmp; |
michael@0 | 1330 | } |
michael@0 | 1331 | } else { |
michael@0 | 1332 | flush(); |
michael@0 | 1333 | JS_ASSERT((alignment & (alignment - 1)) == 0); |
michael@0 | 1334 | while (size() & (alignment-1)) { |
michael@0 | 1335 | BufferOffset tmp = as_nop(); |
michael@0 | 1336 | if (!ret.assigned()) |
michael@0 | 1337 | ret = tmp; |
michael@0 | 1338 | } |
michael@0 | 1339 | } |
michael@0 | 1340 | return ret; |
michael@0 | 1341 | |
michael@0 | 1342 | } |
michael@0 | 1343 | BufferOffset |
michael@0 | 1344 | Assembler::as_nop() |
michael@0 | 1345 | { |
michael@0 | 1346 | return writeInst(0xe320f000); |
michael@0 | 1347 | } |
michael@0 | 1348 | BufferOffset |
michael@0 | 1349 | Assembler::as_alu(Register dest, Register src1, Operand2 op2, |
michael@0 | 1350 | ALUOp op, SetCond_ sc, Condition c, Instruction *instdest) |
michael@0 | 1351 | { |
michael@0 | 1352 | return writeInst((int)op | (int)sc | (int) c | op2.encode() | |
michael@0 | 1353 | ((dest == InvalidReg) ? 0 : RD(dest)) | |
michael@0 | 1354 | ((src1 == InvalidReg) ? 0 : RN(src1)), (uint32_t*)instdest); |
michael@0 | 1355 | } |
michael@0 | 1356 | |
michael@0 | 1357 | BufferOffset |
michael@0 | 1358 | Assembler::as_mov(Register dest, Operand2 op2, SetCond_ sc, Condition c, Instruction *instdest) |
michael@0 | 1359 | { |
michael@0 | 1360 | return as_alu(dest, InvalidReg, op2, op_mov, sc, c, instdest); |
michael@0 | 1361 | } |
michael@0 | 1362 | |
michael@0 | 1363 | BufferOffset |
michael@0 | 1364 | Assembler::as_mvn(Register dest, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1365 | { |
michael@0 | 1366 | return as_alu(dest, InvalidReg, op2, op_mvn, sc, c); |
michael@0 | 1367 | } |
michael@0 | 1368 | |
michael@0 | 1369 | // Logical operations. |
michael@0 | 1370 | BufferOffset |
michael@0 | 1371 | Assembler::as_and(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1372 | { |
michael@0 | 1373 | return as_alu(dest, src1, op2, op_and, sc, c); |
michael@0 | 1374 | } |
michael@0 | 1375 | BufferOffset |
michael@0 | 1376 | Assembler::as_bic(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1377 | { |
michael@0 | 1378 | return as_alu(dest, src1, op2, op_bic, sc, c); |
michael@0 | 1379 | } |
michael@0 | 1380 | BufferOffset |
michael@0 | 1381 | Assembler::as_eor(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1382 | { |
michael@0 | 1383 | return as_alu(dest, src1, op2, op_eor, sc, c); |
michael@0 | 1384 | } |
michael@0 | 1385 | BufferOffset |
michael@0 | 1386 | Assembler::as_orr(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1387 | { |
michael@0 | 1388 | return as_alu(dest, src1, op2, op_orr, sc, c); |
michael@0 | 1389 | } |
michael@0 | 1390 | |
michael@0 | 1391 | // Mathematical operations. |
michael@0 | 1392 | BufferOffset |
michael@0 | 1393 | Assembler::as_adc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1394 | { |
michael@0 | 1395 | return as_alu(dest, src1, op2, op_adc, sc, c); |
michael@0 | 1396 | } |
michael@0 | 1397 | BufferOffset |
michael@0 | 1398 | Assembler::as_add(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1399 | { |
michael@0 | 1400 | return as_alu(dest, src1, op2, op_add, sc, c); |
michael@0 | 1401 | } |
michael@0 | 1402 | BufferOffset |
michael@0 | 1403 | Assembler::as_sbc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1404 | { |
michael@0 | 1405 | return as_alu(dest, src1, op2, op_sbc, sc, c); |
michael@0 | 1406 | } |
michael@0 | 1407 | BufferOffset |
michael@0 | 1408 | Assembler::as_sub(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1409 | { |
michael@0 | 1410 | return as_alu(dest, src1, op2, op_sub, sc, c); |
michael@0 | 1411 | } |
michael@0 | 1412 | BufferOffset |
michael@0 | 1413 | Assembler::as_rsb(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1414 | { |
michael@0 | 1415 | return as_alu(dest, src1, op2, op_rsb, sc, c); |
michael@0 | 1416 | } |
michael@0 | 1417 | BufferOffset |
michael@0 | 1418 | Assembler::as_rsc(Register dest, Register src1, Operand2 op2, SetCond_ sc, Condition c) |
michael@0 | 1419 | { |
michael@0 | 1420 | return as_alu(dest, src1, op2, op_rsc, sc, c); |
michael@0 | 1421 | } |
michael@0 | 1422 | |
michael@0 | 1423 | // Test operations. |
michael@0 | 1424 | BufferOffset |
michael@0 | 1425 | Assembler::as_cmn(Register src1, Operand2 op2, Condition c) |
michael@0 | 1426 | { |
michael@0 | 1427 | return as_alu(InvalidReg, src1, op2, op_cmn, SetCond, c); |
michael@0 | 1428 | } |
michael@0 | 1429 | BufferOffset |
michael@0 | 1430 | Assembler::as_cmp(Register src1, Operand2 op2, Condition c) |
michael@0 | 1431 | { |
michael@0 | 1432 | return as_alu(InvalidReg, src1, op2, op_cmp, SetCond, c); |
michael@0 | 1433 | } |
michael@0 | 1434 | BufferOffset |
michael@0 | 1435 | Assembler::as_teq(Register src1, Operand2 op2, Condition c) |
michael@0 | 1436 | { |
michael@0 | 1437 | return as_alu(InvalidReg, src1, op2, op_teq, SetCond, c); |
michael@0 | 1438 | } |
michael@0 | 1439 | BufferOffset |
michael@0 | 1440 | Assembler::as_tst(Register src1, Operand2 op2, Condition c) |
michael@0 | 1441 | { |
michael@0 | 1442 | return as_alu(InvalidReg, src1, op2, op_tst, SetCond, c); |
michael@0 | 1443 | } |
michael@0 | 1444 | |
michael@0 | 1445 | // Not quite ALU worthy, but useful none the less: |
michael@0 | 1446 | // These also have the isue of these being formatted |
michael@0 | 1447 | // completly differently from the standard ALU operations. |
michael@0 | 1448 | BufferOffset |
michael@0 | 1449 | Assembler::as_movw(Register dest, Imm16 imm, Condition c, Instruction *pos) |
michael@0 | 1450 | { |
michael@0 | 1451 | JS_ASSERT(hasMOVWT()); |
michael@0 | 1452 | return writeInst(0x03000000 | c | imm.encode() | RD(dest), (uint32_t*)pos); |
michael@0 | 1453 | } |
michael@0 | 1454 | BufferOffset |
michael@0 | 1455 | Assembler::as_movt(Register dest, Imm16 imm, Condition c, Instruction *pos) |
michael@0 | 1456 | { |
michael@0 | 1457 | JS_ASSERT(hasMOVWT()); |
michael@0 | 1458 | return writeInst(0x03400000 | c | imm.encode() | RD(dest), (uint32_t*)pos); |
michael@0 | 1459 | } |
michael@0 | 1460 | |
michael@0 | 1461 | static const int mull_tag = 0x90; |
michael@0 | 1462 | |
michael@0 | 1463 | BufferOffset |
michael@0 | 1464 | Assembler::as_genmul(Register dhi, Register dlo, Register rm, Register rn, |
michael@0 | 1465 | MULOp op, SetCond_ sc, Condition c) |
michael@0 | 1466 | { |
michael@0 | 1467 | |
michael@0 | 1468 | return writeInst(RN(dhi) | maybeRD(dlo) | RM(rm) | rn.code() | op | sc | c | mull_tag); |
michael@0 | 1469 | } |
michael@0 | 1470 | BufferOffset |
michael@0 | 1471 | Assembler::as_mul(Register dest, Register src1, Register src2, SetCond_ sc, Condition c) |
michael@0 | 1472 | { |
michael@0 | 1473 | return as_genmul(dest, InvalidReg, src1, src2, opm_mul, sc, c); |
michael@0 | 1474 | } |
michael@0 | 1475 | BufferOffset |
michael@0 | 1476 | Assembler::as_mla(Register dest, Register acc, Register src1, Register src2, |
michael@0 | 1477 | SetCond_ sc, Condition c) |
michael@0 | 1478 | { |
michael@0 | 1479 | return as_genmul(dest, acc, src1, src2, opm_mla, sc, c); |
michael@0 | 1480 | } |
michael@0 | 1481 | BufferOffset |
michael@0 | 1482 | Assembler::as_umaal(Register destHI, Register destLO, Register src1, Register src2, Condition c) |
michael@0 | 1483 | { |
michael@0 | 1484 | return as_genmul(destHI, destLO, src1, src2, opm_umaal, NoSetCond, c); |
michael@0 | 1485 | } |
michael@0 | 1486 | BufferOffset |
michael@0 | 1487 | Assembler::as_mls(Register dest, Register acc, Register src1, Register src2, Condition c) |
michael@0 | 1488 | { |
michael@0 | 1489 | return as_genmul(dest, acc, src1, src2, opm_mls, NoSetCond, c); |
michael@0 | 1490 | } |
michael@0 | 1491 | |
michael@0 | 1492 | BufferOffset |
michael@0 | 1493 | Assembler::as_umull(Register destHI, Register destLO, Register src1, Register src2, |
michael@0 | 1494 | SetCond_ sc, Condition c) |
michael@0 | 1495 | { |
michael@0 | 1496 | return as_genmul(destHI, destLO, src1, src2, opm_umull, sc, c); |
michael@0 | 1497 | } |
michael@0 | 1498 | |
michael@0 | 1499 | BufferOffset |
michael@0 | 1500 | Assembler::as_umlal(Register destHI, Register destLO, Register src1, Register src2, |
michael@0 | 1501 | SetCond_ sc, Condition c) |
michael@0 | 1502 | { |
michael@0 | 1503 | return as_genmul(destHI, destLO, src1, src2, opm_umlal, sc, c); |
michael@0 | 1504 | } |
michael@0 | 1505 | |
michael@0 | 1506 | BufferOffset |
michael@0 | 1507 | Assembler::as_smull(Register destHI, Register destLO, Register src1, Register src2, |
michael@0 | 1508 | SetCond_ sc, Condition c) |
michael@0 | 1509 | { |
michael@0 | 1510 | return as_genmul(destHI, destLO, src1, src2, opm_smull, sc, c); |
michael@0 | 1511 | } |
michael@0 | 1512 | |
michael@0 | 1513 | BufferOffset |
michael@0 | 1514 | Assembler::as_smlal(Register destHI, Register destLO, Register src1, Register src2, |
michael@0 | 1515 | SetCond_ sc, Condition c) |
michael@0 | 1516 | { |
michael@0 | 1517 | return as_genmul(destHI, destLO, src1, src2, opm_smlal, sc, c); |
michael@0 | 1518 | } |
michael@0 | 1519 | |
michael@0 | 1520 | BufferOffset |
michael@0 | 1521 | Assembler::as_sdiv(Register rd, Register rn, Register rm, Condition c) |
michael@0 | 1522 | { |
michael@0 | 1523 | return writeInst(0x0710f010 | c | RN(rd) | RM(rm) | rn.code()); |
michael@0 | 1524 | } |
michael@0 | 1525 | |
michael@0 | 1526 | BufferOffset |
michael@0 | 1527 | Assembler::as_udiv(Register rd, Register rn, Register rm, Condition c) |
michael@0 | 1528 | { |
michael@0 | 1529 | return writeInst(0x0730f010 | c | RN(rd) | RM(rm) | rn.code()); |
michael@0 | 1530 | } |
michael@0 | 1531 | |
michael@0 | 1532 | // Data transfer instructions: ldr, str, ldrb, strb. |
michael@0 | 1533 | // Using an int to differentiate between 8 bits and 32 bits is |
michael@0 | 1534 | // overkill, but meh |
michael@0 | 1535 | BufferOffset |
michael@0 | 1536 | Assembler::as_dtr(LoadStore ls, int size, Index mode, |
michael@0 | 1537 | Register rt, DTRAddr addr, Condition c, uint32_t *dest) |
michael@0 | 1538 | { |
michael@0 | 1539 | JS_ASSERT (mode == Offset || (rt != addr.getBase() && pc != addr.getBase())); |
michael@0 | 1540 | JS_ASSERT(size == 32 || size == 8); |
michael@0 | 1541 | return writeInst( 0x04000000 | ls | (size == 8 ? 0x00400000 : 0) | mode | c | |
michael@0 | 1542 | RT(rt) | addr.encode(), dest); |
michael@0 | 1543 | |
michael@0 | 1544 | } |
michael@0 | 1545 | class PoolHintData { |
michael@0 | 1546 | public: |
michael@0 | 1547 | enum LoadType { |
michael@0 | 1548 | // set 0 to bogus, since that is the value most likely to be |
michael@0 | 1549 | // accidentally left somewhere. |
michael@0 | 1550 | poolBOGUS = 0, |
michael@0 | 1551 | poolDTR = 1, |
michael@0 | 1552 | poolBranch = 2, |
michael@0 | 1553 | poolVDTR = 3 |
michael@0 | 1554 | }; |
michael@0 | 1555 | |
michael@0 | 1556 | private: |
michael@0 | 1557 | uint32_t index : 16; |
michael@0 | 1558 | uint32_t cond : 4; |
michael@0 | 1559 | LoadType loadType : 2; |
michael@0 | 1560 | uint32_t destReg : 5; |
michael@0 | 1561 | uint32_t destType : 1; |
michael@0 | 1562 | uint32_t ONES : 4; |
michael@0 | 1563 | |
michael@0 | 1564 | static const uint32_t expectedOnes = 0xfu; |
michael@0 | 1565 | |
michael@0 | 1566 | public: |
michael@0 | 1567 | void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const Register &destReg_) { |
michael@0 | 1568 | index = index_; |
michael@0 | 1569 | JS_ASSERT(index == index_); |
michael@0 | 1570 | cond = cond_ >> 28; |
michael@0 | 1571 | JS_ASSERT(cond == cond_ >> 28); |
michael@0 | 1572 | loadType = lt; |
michael@0 | 1573 | ONES = expectedOnes; |
michael@0 | 1574 | destReg = destReg_.code(); |
michael@0 | 1575 | destType = 0; |
michael@0 | 1576 | } |
michael@0 | 1577 | void init(uint32_t index_, Assembler::Condition cond_, LoadType lt, const VFPRegister &destReg_) { |
michael@0 | 1578 | JS_ASSERT(destReg_.isFloat()); |
michael@0 | 1579 | index = index_; |
michael@0 | 1580 | JS_ASSERT(index == index_); |
michael@0 | 1581 | cond = cond_ >> 28; |
michael@0 | 1582 | JS_ASSERT(cond == cond_ >> 28); |
michael@0 | 1583 | loadType = lt; |
michael@0 | 1584 | ONES = expectedOnes; |
michael@0 | 1585 | destReg = destReg_.isDouble() ? destReg_.code() : destReg_.doubleOverlay().code(); |
michael@0 | 1586 | destType = destReg_.isDouble(); |
michael@0 | 1587 | } |
michael@0 | 1588 | Assembler::Condition getCond() { |
michael@0 | 1589 | return Assembler::Condition(cond << 28); |
michael@0 | 1590 | } |
michael@0 | 1591 | |
michael@0 | 1592 | Register getReg() { |
michael@0 | 1593 | return Register::FromCode(destReg); |
michael@0 | 1594 | } |
michael@0 | 1595 | VFPRegister getVFPReg() { |
michael@0 | 1596 | VFPRegister r = VFPRegister(FloatRegister::FromCode(destReg)); |
michael@0 | 1597 | return destType ? r : r.singleOverlay(); |
michael@0 | 1598 | } |
michael@0 | 1599 | |
michael@0 | 1600 | int32_t getIndex() { |
michael@0 | 1601 | return index; |
michael@0 | 1602 | } |
michael@0 | 1603 | void setIndex(uint32_t index_) { |
michael@0 | 1604 | JS_ASSERT(ONES == expectedOnes && loadType != poolBOGUS); |
michael@0 | 1605 | index = index_; |
michael@0 | 1606 | JS_ASSERT(index == index_); |
michael@0 | 1607 | } |
michael@0 | 1608 | |
michael@0 | 1609 | LoadType getLoadType() { |
michael@0 | 1610 | // If this *was* a poolBranch, but the branch has already been bound |
michael@0 | 1611 | // then this isn't going to look like a real poolhintdata, but we still |
michael@0 | 1612 | // want to lie about it so everyone knows it *used* to be a branch. |
michael@0 | 1613 | if (ONES != expectedOnes) |
michael@0 | 1614 | return PoolHintData::poolBranch; |
michael@0 | 1615 | return loadType; |
michael@0 | 1616 | } |
michael@0 | 1617 | |
michael@0 | 1618 | bool isValidPoolHint() { |
michael@0 | 1619 | // Most instructions cannot have a condition that is 0xf. Notable exceptions are |
michael@0 | 1620 | // blx and the entire NEON instruction set. For the purposes of pool loads, and |
michael@0 | 1621 | // possibly patched branches, the possible instructions are ldr and b, neither of |
michael@0 | 1622 | // which can have a condition code of 0xf. |
michael@0 | 1623 | return ONES == expectedOnes; |
michael@0 | 1624 | } |
michael@0 | 1625 | }; |
michael@0 | 1626 | |
michael@0 | 1627 | union PoolHintPun { |
michael@0 | 1628 | PoolHintData phd; |
michael@0 | 1629 | uint32_t raw; |
michael@0 | 1630 | }; |
michael@0 | 1631 | |
michael@0 | 1632 | // Handles all of the other integral data transferring functions: |
michael@0 | 1633 | // ldrsb, ldrsh, ldrd, etc. |
michael@0 | 1634 | // size is given in bits. |
michael@0 | 1635 | BufferOffset |
michael@0 | 1636 | Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned, Index mode, |
michael@0 | 1637 | Register rt, EDtrAddr addr, Condition c, uint32_t *dest) |
michael@0 | 1638 | { |
michael@0 | 1639 | int extra_bits2 = 0; |
michael@0 | 1640 | int extra_bits1 = 0; |
michael@0 | 1641 | switch(size) { |
michael@0 | 1642 | case 8: |
michael@0 | 1643 | JS_ASSERT(IsSigned); |
michael@0 | 1644 | JS_ASSERT(ls!=IsStore); |
michael@0 | 1645 | extra_bits1 = 0x1; |
michael@0 | 1646 | extra_bits2 = 0x2; |
michael@0 | 1647 | break; |
michael@0 | 1648 | case 16: |
michael@0 | 1649 | //case 32: |
michael@0 | 1650 | // doesn't need to be handled-- it is handled by the default ldr/str |
michael@0 | 1651 | extra_bits2 = 0x01; |
michael@0 | 1652 | extra_bits1 = (ls == IsStore) ? 0 : 1; |
michael@0 | 1653 | if (IsSigned) { |
michael@0 | 1654 | JS_ASSERT(ls != IsStore); |
michael@0 | 1655 | extra_bits2 |= 0x2; |
michael@0 | 1656 | } |
michael@0 | 1657 | break; |
michael@0 | 1658 | case 64: |
michael@0 | 1659 | extra_bits2 = (ls == IsStore) ? 0x3 : 0x2; |
michael@0 | 1660 | extra_bits1 = 0; |
michael@0 | 1661 | break; |
michael@0 | 1662 | default: |
michael@0 | 1663 | MOZ_ASSUME_UNREACHABLE("SAY WHAT?"); |
michael@0 | 1664 | } |
michael@0 | 1665 | return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | |
michael@0 | 1666 | addr.encode() | RT(rt) | mode | c, dest); |
michael@0 | 1667 | } |
michael@0 | 1668 | |
michael@0 | 1669 | BufferOffset |
michael@0 | 1670 | Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask, |
michael@0 | 1671 | DTMMode mode, DTMWriteBack wb, Condition c) |
michael@0 | 1672 | { |
michael@0 | 1673 | return writeInst(0x08000000 | RN(rn) | ls | |
michael@0 | 1674 | mode | mask | c | wb); |
michael@0 | 1675 | } |
michael@0 | 1676 | |
michael@0 | 1677 | BufferOffset |
michael@0 | 1678 | Assembler::as_Imm32Pool(Register dest, uint32_t value, Condition c) |
michael@0 | 1679 | { |
michael@0 | 1680 | PoolHintPun php; |
michael@0 | 1681 | php.phd.init(0, c, PoolHintData::poolDTR, dest); |
michael@0 | 1682 | return m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value); |
michael@0 | 1683 | } |
michael@0 | 1684 | |
michael@0 | 1685 | void |
michael@0 | 1686 | Assembler::as_WritePoolEntry(Instruction *addr, Condition c, uint32_t data) |
michael@0 | 1687 | { |
michael@0 | 1688 | JS_ASSERT(addr->is<InstLDR>()); |
michael@0 | 1689 | int32_t offset = addr->encode() & 0xfff; |
michael@0 | 1690 | if ((addr->encode() & IsUp) != IsUp) |
michael@0 | 1691 | offset = -offset; |
michael@0 | 1692 | char * rawAddr = reinterpret_cast<char*>(addr); |
michael@0 | 1693 | uint32_t * dest = reinterpret_cast<uint32_t*>(&rawAddr[offset + 8]); |
michael@0 | 1694 | *dest = data; |
michael@0 | 1695 | Condition orig_cond; |
michael@0 | 1696 | addr->extractCond(&orig_cond); |
michael@0 | 1697 | JS_ASSERT(orig_cond == c); |
michael@0 | 1698 | } |
michael@0 | 1699 | |
michael@0 | 1700 | BufferOffset |
michael@0 | 1701 | Assembler::as_BranchPool(uint32_t value, RepatchLabel *label, ARMBuffer::PoolEntry *pe, Condition c) |
michael@0 | 1702 | { |
michael@0 | 1703 | PoolHintPun php; |
michael@0 | 1704 | php.phd.init(0, c, PoolHintData::poolBranch, pc); |
michael@0 | 1705 | m_buffer.markNextAsBranch(); |
michael@0 | 1706 | BufferOffset ret = m_buffer.insertEntry(4, (uint8_t*)&php.raw, int32Pool, (uint8_t*)&value, pe); |
michael@0 | 1707 | // If this label is already bound, then immediately replace the stub load with |
michael@0 | 1708 | // a correct branch. |
michael@0 | 1709 | if (label->bound()) { |
michael@0 | 1710 | BufferOffset dest(label); |
michael@0 | 1711 | as_b(dest.diffB<BOffImm>(ret), c, ret); |
michael@0 | 1712 | } else { |
michael@0 | 1713 | label->use(ret.getOffset()); |
michael@0 | 1714 | } |
michael@0 | 1715 | return ret; |
michael@0 | 1716 | } |
michael@0 | 1717 | |
michael@0 | 1718 | BufferOffset |
michael@0 | 1719 | Assembler::as_FImm64Pool(VFPRegister dest, double value, Condition c) |
michael@0 | 1720 | { |
michael@0 | 1721 | JS_ASSERT(dest.isDouble()); |
michael@0 | 1722 | PoolHintPun php; |
michael@0 | 1723 | php.phd.init(0, c, PoolHintData::poolVDTR, dest); |
michael@0 | 1724 | return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&value); |
michael@0 | 1725 | } |
michael@0 | 1726 | |
michael@0 | 1727 | struct PaddedFloat32 |
michael@0 | 1728 | { |
michael@0 | 1729 | float value; |
michael@0 | 1730 | uint32_t padding; |
michael@0 | 1731 | }; |
michael@0 | 1732 | JS_STATIC_ASSERT(sizeof(PaddedFloat32) == sizeof(double)); |
michael@0 | 1733 | |
michael@0 | 1734 | BufferOffset |
michael@0 | 1735 | Assembler::as_FImm32Pool(VFPRegister dest, float value, Condition c) |
michael@0 | 1736 | { |
michael@0 | 1737 | /* |
michael@0 | 1738 | * Insert floats into the double pool as they have the same limitations on |
michael@0 | 1739 | * immediate offset. This wastes 4 bytes padding per float. An alternative |
michael@0 | 1740 | * would be to have a separate pool for floats. |
michael@0 | 1741 | */ |
michael@0 | 1742 | JS_ASSERT(dest.isSingle()); |
michael@0 | 1743 | PoolHintPun php; |
michael@0 | 1744 | php.phd.init(0, c, PoolHintData::poolVDTR, dest); |
michael@0 | 1745 | PaddedFloat32 pf = { value, 0 }; |
michael@0 | 1746 | return m_buffer.insertEntry(4, (uint8_t*)&php.raw, doublePool, (uint8_t*)&pf); |
michael@0 | 1747 | } |
michael@0 | 1748 | |
michael@0 | 1749 | // Pool callbacks stuff: |
michael@0 | 1750 | void |
michael@0 | 1751 | Assembler::insertTokenIntoTag(uint32_t instSize, uint8_t *load_, int32_t token) |
michael@0 | 1752 | { |
michael@0 | 1753 | uint32_t *load = (uint32_t*) load_; |
michael@0 | 1754 | PoolHintPun php; |
michael@0 | 1755 | php.raw = *load; |
michael@0 | 1756 | php.phd.setIndex(token); |
michael@0 | 1757 | *load = php.raw; |
michael@0 | 1758 | } |
michael@0 | 1759 | // patchConstantPoolLoad takes the address of the instruction that wants to be patched, and |
michael@0 | 1760 | //the address of the start of the constant pool, and figures things out from there. |
michael@0 | 1761 | bool |
michael@0 | 1762 | Assembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr) |
michael@0 | 1763 | { |
michael@0 | 1764 | PoolHintData data = *(PoolHintData*)loadAddr; |
michael@0 | 1765 | uint32_t *instAddr = (uint32_t*) loadAddr; |
michael@0 | 1766 | int offset = (char *)constPoolAddr - (char *)loadAddr; |
michael@0 | 1767 | switch(data.getLoadType()) { |
michael@0 | 1768 | case PoolHintData::poolBOGUS: |
michael@0 | 1769 | MOZ_ASSUME_UNREACHABLE("bogus load type!"); |
michael@0 | 1770 | case PoolHintData::poolDTR: |
michael@0 | 1771 | dummy->as_dtr(IsLoad, 32, Offset, data.getReg(), |
michael@0 | 1772 | DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), data.getCond(), instAddr); |
michael@0 | 1773 | break; |
michael@0 | 1774 | case PoolHintData::poolBranch: |
michael@0 | 1775 | // Either this used to be a poolBranch, and the label was already bound, so it was |
michael@0 | 1776 | // replaced with a real branch, or this may happen in the future. |
michael@0 | 1777 | // If this is going to happen in the future, then the actual bits that are written here |
michael@0 | 1778 | // don't matter (except the condition code, since that is always preserved across |
michael@0 | 1779 | // patchings) but if it does not get bound later, |
michael@0 | 1780 | // then we want to make sure this is a load from the pool entry (and the pool entry |
michael@0 | 1781 | // should be nullptr so it will crash). |
michael@0 | 1782 | if (data.isValidPoolHint()) { |
michael@0 | 1783 | dummy->as_dtr(IsLoad, 32, Offset, pc, |
michael@0 | 1784 | DTRAddr(pc, DtrOffImm(offset+4*data.getIndex() - 8)), |
michael@0 | 1785 | data.getCond(), instAddr); |
michael@0 | 1786 | } |
michael@0 | 1787 | break; |
michael@0 | 1788 | case PoolHintData::poolVDTR: { |
michael@0 | 1789 | VFPRegister dest = data.getVFPReg(); |
michael@0 | 1790 | int32_t imm = offset + (8 * data.getIndex()) - 8; |
michael@0 | 1791 | if (imm < -1023 || imm > 1023) |
michael@0 | 1792 | return false; |
michael@0 | 1793 | dummy->as_vdtr(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)), data.getCond(), instAddr); |
michael@0 | 1794 | break; |
michael@0 | 1795 | } |
michael@0 | 1796 | } |
michael@0 | 1797 | return true; |
michael@0 | 1798 | } |
michael@0 | 1799 | |
michael@0 | 1800 | uint32_t |
michael@0 | 1801 | Assembler::placeConstantPoolBarrier(int offset) |
michael@0 | 1802 | { |
michael@0 | 1803 | // BUG: 700526 |
michael@0 | 1804 | // this is still an active path, however, we do not hit it in the test |
michael@0 | 1805 | // suite at all. |
michael@0 | 1806 | MOZ_ASSUME_UNREACHABLE("ARMAssembler holdover"); |
michael@0 | 1807 | } |
michael@0 | 1808 | |
michael@0 | 1809 | // Control flow stuff: |
michael@0 | 1810 | |
michael@0 | 1811 | // bx can *only* branch to a register |
michael@0 | 1812 | // never to an immediate. |
michael@0 | 1813 | BufferOffset |
michael@0 | 1814 | Assembler::as_bx(Register r, Condition c, bool isPatchable) |
michael@0 | 1815 | { |
michael@0 | 1816 | BufferOffset ret = writeInst(((int) c) | op_bx | r.code()); |
michael@0 | 1817 | if (c == Always && !isPatchable) |
michael@0 | 1818 | m_buffer.markGuard(); |
michael@0 | 1819 | return ret; |
michael@0 | 1820 | } |
michael@0 | 1821 | void |
michael@0 | 1822 | Assembler::writePoolGuard(BufferOffset branch, Instruction *dest, BufferOffset afterPool) |
michael@0 | 1823 | { |
michael@0 | 1824 | BOffImm off = afterPool.diffB<BOffImm>(branch); |
michael@0 | 1825 | *dest = InstBImm(off, Always); |
michael@0 | 1826 | } |
michael@0 | 1827 | // Branch can branch to an immediate *or* to a register. |
michael@0 | 1828 | // Branches to immediates are pc relative, branches to registers |
michael@0 | 1829 | // are absolute |
michael@0 | 1830 | BufferOffset |
michael@0 | 1831 | Assembler::as_b(BOffImm off, Condition c, bool isPatchable) |
michael@0 | 1832 | { |
michael@0 | 1833 | m_buffer.markNextAsBranch(); |
michael@0 | 1834 | BufferOffset ret =writeInst(((int)c) | op_b | off.encode()); |
michael@0 | 1835 | if (c == Always && !isPatchable) |
michael@0 | 1836 | m_buffer.markGuard(); |
michael@0 | 1837 | return ret; |
michael@0 | 1838 | } |
michael@0 | 1839 | |
michael@0 | 1840 | BufferOffset |
michael@0 | 1841 | Assembler::as_b(Label *l, Condition c, bool isPatchable) |
michael@0 | 1842 | { |
michael@0 | 1843 | if (m_buffer.oom()) { |
michael@0 | 1844 | BufferOffset ret; |
michael@0 | 1845 | return ret; |
michael@0 | 1846 | } |
michael@0 | 1847 | m_buffer.markNextAsBranch(); |
michael@0 | 1848 | if (l->bound()) { |
michael@0 | 1849 | BufferOffset ret = as_nop(); |
michael@0 | 1850 | as_b(BufferOffset(l).diffB<BOffImm>(ret), c, ret); |
michael@0 | 1851 | return ret; |
michael@0 | 1852 | } |
michael@0 | 1853 | |
michael@0 | 1854 | int32_t old; |
michael@0 | 1855 | BufferOffset ret; |
michael@0 | 1856 | if (l->used()) { |
michael@0 | 1857 | old = l->offset(); |
michael@0 | 1858 | // This will currently throw an assertion if we couldn't actually |
michael@0 | 1859 | // encode the offset of the branch. |
michael@0 | 1860 | if (!BOffImm::isInRange(old)) { |
michael@0 | 1861 | m_buffer.fail_bail(); |
michael@0 | 1862 | return ret; |
michael@0 | 1863 | } |
michael@0 | 1864 | ret = as_b(BOffImm(old), c, isPatchable); |
michael@0 | 1865 | } else { |
michael@0 | 1866 | old = LabelBase::INVALID_OFFSET; |
michael@0 | 1867 | BOffImm inv; |
michael@0 | 1868 | ret = as_b(inv, c, isPatchable); |
michael@0 | 1869 | } |
michael@0 | 1870 | DebugOnly<int32_t> check = l->use(ret.getOffset()); |
michael@0 | 1871 | JS_ASSERT(check == old); |
michael@0 | 1872 | return ret; |
michael@0 | 1873 | } |
michael@0 | 1874 | BufferOffset |
michael@0 | 1875 | Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) |
michael@0 | 1876 | { |
michael@0 | 1877 | *editSrc(inst) = InstBImm(off, c); |
michael@0 | 1878 | return inst; |
michael@0 | 1879 | } |
michael@0 | 1880 | |
michael@0 | 1881 | // blx can go to either an immediate or a register. |
michael@0 | 1882 | // When blx'ing to a register, we change processor state |
michael@0 | 1883 | // depending on the low bit of the register |
michael@0 | 1884 | // when blx'ing to an immediate, we *always* change processor state. |
michael@0 | 1885 | |
michael@0 | 1886 | BufferOffset |
michael@0 | 1887 | Assembler::as_blx(Register r, Condition c) |
michael@0 | 1888 | { |
michael@0 | 1889 | return writeInst(((int) c) | op_blx | r.code()); |
michael@0 | 1890 | } |
michael@0 | 1891 | |
michael@0 | 1892 | // bl can only branch to an pc-relative immediate offset |
michael@0 | 1893 | // It cannot change the processor state. |
michael@0 | 1894 | BufferOffset |
michael@0 | 1895 | Assembler::as_bl(BOffImm off, Condition c) |
michael@0 | 1896 | { |
michael@0 | 1897 | m_buffer.markNextAsBranch(); |
michael@0 | 1898 | return writeInst(((int)c) | op_bl | off.encode()); |
michael@0 | 1899 | } |
michael@0 | 1900 | |
michael@0 | 1901 | BufferOffset |
michael@0 | 1902 | Assembler::as_bl(Label *l, Condition c) |
michael@0 | 1903 | { |
michael@0 | 1904 | if (m_buffer.oom()) { |
michael@0 | 1905 | BufferOffset ret; |
michael@0 | 1906 | return ret; |
michael@0 | 1907 | } |
michael@0 | 1908 | m_buffer.markNextAsBranch(); |
michael@0 | 1909 | if (l->bound()) { |
michael@0 | 1910 | BufferOffset ret = as_nop(); |
michael@0 | 1911 | as_bl(BufferOffset(l).diffB<BOffImm>(ret), c, ret); |
michael@0 | 1912 | return ret; |
michael@0 | 1913 | } |
michael@0 | 1914 | |
michael@0 | 1915 | int32_t old; |
michael@0 | 1916 | BufferOffset ret; |
michael@0 | 1917 | // See if the list was empty :( |
michael@0 | 1918 | if (l->used()) { |
michael@0 | 1919 | // This will currently throw an assertion if we couldn't actually |
michael@0 | 1920 | // encode the offset of the branch. |
michael@0 | 1921 | old = l->offset(); |
michael@0 | 1922 | if (!BOffImm::isInRange(old)) { |
michael@0 | 1923 | m_buffer.fail_bail(); |
michael@0 | 1924 | return ret; |
michael@0 | 1925 | } |
michael@0 | 1926 | ret = as_bl(BOffImm(old), c); |
michael@0 | 1927 | } else { |
michael@0 | 1928 | old = LabelBase::INVALID_OFFSET; |
michael@0 | 1929 | BOffImm inv; |
michael@0 | 1930 | ret = as_bl(inv, c); |
michael@0 | 1931 | } |
michael@0 | 1932 | DebugOnly<int32_t> check = l->use(ret.getOffset()); |
michael@0 | 1933 | JS_ASSERT(check == old); |
michael@0 | 1934 | return ret; |
michael@0 | 1935 | } |
michael@0 | 1936 | BufferOffset |
michael@0 | 1937 | Assembler::as_bl(BOffImm off, Condition c, BufferOffset inst) |
michael@0 | 1938 | { |
michael@0 | 1939 | *editSrc(inst) = InstBLImm(off, c); |
michael@0 | 1940 | return inst; |
michael@0 | 1941 | } |
michael@0 | 1942 | |
michael@0 | 1943 | BufferOffset |
michael@0 | 1944 | Assembler::as_mrs(Register r, Condition c) |
michael@0 | 1945 | { |
michael@0 | 1946 | return writeInst(0x010f0000 | int(c) | RD(r)); |
michael@0 | 1947 | } |
michael@0 | 1948 | |
michael@0 | 1949 | BufferOffset |
michael@0 | 1950 | Assembler::as_msr(Register r, Condition c) |
michael@0 | 1951 | { |
michael@0 | 1952 | // hardcode the 'mask' field to 0b11 for now. it is bits 18 and 19, which are the two high bits of the 'c' in this constant. |
michael@0 | 1953 | JS_ASSERT((r.code() & ~0xf) == 0); |
michael@0 | 1954 | return writeInst(0x012cf000 | int(c) | r.code()); |
michael@0 | 1955 | } |
michael@0 | 1956 | |
michael@0 | 1957 | // VFP instructions! |
michael@0 | 1958 | enum vfp_tags { |
michael@0 | 1959 | vfp_tag = 0x0C000A00, |
michael@0 | 1960 | vfp_arith = 0x02000000 |
michael@0 | 1961 | }; |
michael@0 | 1962 | BufferOffset |
michael@0 | 1963 | Assembler::writeVFPInst(vfp_size sz, uint32_t blob, uint32_t *dest) |
michael@0 | 1964 | { |
michael@0 | 1965 | JS_ASSERT((sz & blob) == 0); |
michael@0 | 1966 | JS_ASSERT((vfp_tag & blob) == 0); |
michael@0 | 1967 | return writeInst(vfp_tag | sz | blob, dest); |
michael@0 | 1968 | } |
michael@0 | 1969 | |
michael@0 | 1970 | // Unityped variants: all registers hold the same (ieee754 single/double) |
michael@0 | 1971 | // notably not included are vcvt; vmov vd, #imm; vmov rt, vn. |
michael@0 | 1972 | BufferOffset |
michael@0 | 1973 | Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 1974 | VFPOp op, Condition c) |
michael@0 | 1975 | { |
michael@0 | 1976 | // Make sure we believe that all of our operands are the same kind |
michael@0 | 1977 | JS_ASSERT_IF(!vn.isMissing(), vd.equiv(vn)); |
michael@0 | 1978 | JS_ASSERT_IF(!vm.isMissing(), vd.equiv(vm)); |
michael@0 | 1979 | vfp_size sz = vd.isDouble() ? isDouble : isSingle; |
michael@0 | 1980 | return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | vfp_arith | c); |
michael@0 | 1981 | } |
michael@0 | 1982 | |
michael@0 | 1983 | BufferOffset |
michael@0 | 1984 | Assembler::as_vadd(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 1985 | Condition c) |
michael@0 | 1986 | { |
michael@0 | 1987 | return as_vfp_float(vd, vn, vm, opv_add, c); |
michael@0 | 1988 | } |
michael@0 | 1989 | |
michael@0 | 1990 | BufferOffset |
michael@0 | 1991 | Assembler::as_vdiv(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 1992 | Condition c) |
michael@0 | 1993 | { |
michael@0 | 1994 | return as_vfp_float(vd, vn, vm, opv_div, c); |
michael@0 | 1995 | } |
michael@0 | 1996 | |
michael@0 | 1997 | BufferOffset |
michael@0 | 1998 | Assembler::as_vmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 1999 | Condition c) |
michael@0 | 2000 | { |
michael@0 | 2001 | return as_vfp_float(vd, vn, vm, opv_mul, c); |
michael@0 | 2002 | } |
michael@0 | 2003 | |
michael@0 | 2004 | BufferOffset |
michael@0 | 2005 | Assembler::as_vnmul(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 2006 | Condition c) |
michael@0 | 2007 | { |
michael@0 | 2008 | return as_vfp_float(vd, vn, vm, opv_mul, c); |
michael@0 | 2009 | MOZ_ASSUME_UNREACHABLE("Feature NYI"); |
michael@0 | 2010 | } |
michael@0 | 2011 | |
michael@0 | 2012 | BufferOffset |
michael@0 | 2013 | Assembler::as_vnmla(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 2014 | Condition c) |
michael@0 | 2015 | { |
michael@0 | 2016 | MOZ_ASSUME_UNREACHABLE("Feature NYI"); |
michael@0 | 2017 | } |
michael@0 | 2018 | |
michael@0 | 2019 | BufferOffset |
michael@0 | 2020 | Assembler::as_vnmls(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 2021 | Condition c) |
michael@0 | 2022 | { |
michael@0 | 2023 | MOZ_ASSUME_UNREACHABLE("Feature NYI"); |
michael@0 | 2024 | return BufferOffset(); |
michael@0 | 2025 | } |
michael@0 | 2026 | |
michael@0 | 2027 | BufferOffset |
michael@0 | 2028 | Assembler::as_vneg(VFPRegister vd, VFPRegister vm, Condition c) |
michael@0 | 2029 | { |
michael@0 | 2030 | return as_vfp_float(vd, NoVFPRegister, vm, opv_neg, c); |
michael@0 | 2031 | } |
michael@0 | 2032 | |
michael@0 | 2033 | BufferOffset |
michael@0 | 2034 | Assembler::as_vsqrt(VFPRegister vd, VFPRegister vm, Condition c) |
michael@0 | 2035 | { |
michael@0 | 2036 | return as_vfp_float(vd, NoVFPRegister, vm, opv_sqrt, c); |
michael@0 | 2037 | } |
michael@0 | 2038 | |
michael@0 | 2039 | BufferOffset |
michael@0 | 2040 | Assembler::as_vabs(VFPRegister vd, VFPRegister vm, Condition c) |
michael@0 | 2041 | { |
michael@0 | 2042 | return as_vfp_float(vd, NoVFPRegister, vm, opv_abs, c); |
michael@0 | 2043 | } |
michael@0 | 2044 | |
michael@0 | 2045 | BufferOffset |
michael@0 | 2046 | Assembler::as_vsub(VFPRegister vd, VFPRegister vn, VFPRegister vm, |
michael@0 | 2047 | Condition c) |
michael@0 | 2048 | { |
michael@0 | 2049 | return as_vfp_float(vd, vn, vm, opv_sub, c); |
michael@0 | 2050 | } |
michael@0 | 2051 | |
michael@0 | 2052 | BufferOffset |
michael@0 | 2053 | Assembler::as_vcmp(VFPRegister vd, VFPRegister vm, |
michael@0 | 2054 | Condition c) |
michael@0 | 2055 | { |
michael@0 | 2056 | return as_vfp_float(vd, NoVFPRegister, vm, opv_cmp, c); |
michael@0 | 2057 | } |
michael@0 | 2058 | BufferOffset |
michael@0 | 2059 | Assembler::as_vcmpz(VFPRegister vd, Condition c) |
michael@0 | 2060 | { |
michael@0 | 2061 | return as_vfp_float(vd, NoVFPRegister, NoVFPRegister, opv_cmpz, c); |
michael@0 | 2062 | } |
michael@0 | 2063 | |
michael@0 | 2064 | // Specifically, a move between two same sized-registers. |
michael@0 | 2065 | BufferOffset |
michael@0 | 2066 | Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) |
michael@0 | 2067 | { |
michael@0 | 2068 | return as_vfp_float(vd, NoVFPRegister, vsrc, opv_mov, c); |
michael@0 | 2069 | } |
michael@0 | 2070 | //xfer between Core and VFP |
michael@0 | 2071 | |
michael@0 | 2072 | // Unlike the next function, moving between the core registers and vfp |
michael@0 | 2073 | // registers can't be *that* properly typed. Namely, since I don't want to |
michael@0 | 2074 | // munge the type VFPRegister to also include core registers. Thus, the core |
michael@0 | 2075 | // and vfp registers are passed in based on their type, and src/dest is |
michael@0 | 2076 | // determined by the float2core. |
michael@0 | 2077 | |
michael@0 | 2078 | BufferOffset |
michael@0 | 2079 | Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm, FloatToCore_ f2c, |
michael@0 | 2080 | Condition c, int idx) |
michael@0 | 2081 | { |
michael@0 | 2082 | vfp_size sz = isSingle; |
michael@0 | 2083 | if (vm.isDouble()) { |
michael@0 | 2084 | // Technically, this can be done with a vmov à la ARM ARM under vmov |
michael@0 | 2085 | // however, that requires at least an extra bit saying if the |
michael@0 | 2086 | // operation should be performed on the lower or upper half of the |
michael@0 | 2087 | // double. Moving a single to/from 2N/2N+1 isn't equivalent, |
michael@0 | 2088 | // since there are 32 single registers, and 32 double registers |
michael@0 | 2089 | // so there is no way to encode the last 16 double registers. |
michael@0 | 2090 | sz = isDouble; |
michael@0 | 2091 | JS_ASSERT(idx == 0 || idx == 1); |
michael@0 | 2092 | // If we are transferring a single half of the double |
michael@0 | 2093 | // then it must be moving a VFP reg to a core reg. |
michael@0 | 2094 | if (vt2 == InvalidReg) |
michael@0 | 2095 | JS_ASSERT(f2c == FloatToCore); |
michael@0 | 2096 | idx = idx << 21; |
michael@0 | 2097 | } else { |
michael@0 | 2098 | JS_ASSERT(idx == 0); |
michael@0 | 2099 | } |
michael@0 | 2100 | VFPXferSize xfersz = WordTransfer; |
michael@0 | 2101 | uint32_t (*encodeVFP)(VFPRegister) = VN; |
michael@0 | 2102 | if (vt2 != InvalidReg) { |
michael@0 | 2103 | // We are doing a 64 bit transfer. |
michael@0 | 2104 | xfersz = DoubleTransfer; |
michael@0 | 2105 | encodeVFP = VM; |
michael@0 | 2106 | } |
michael@0 | 2107 | |
michael@0 | 2108 | return writeVFPInst(sz, xfersz | f2c | c | |
michael@0 | 2109 | RT(vt1) | maybeRN(vt2) | encodeVFP(vm) | idx); |
michael@0 | 2110 | } |
michael@0 | 2111 | enum vcvt_destFloatness { |
michael@0 | 2112 | toInteger = 1 << 18, |
michael@0 | 2113 | toFloat = 0 << 18 |
michael@0 | 2114 | }; |
michael@0 | 2115 | enum vcvt_toZero { |
michael@0 | 2116 | toZero = 1 << 7, // use the default rounding mode, which rounds truncates |
michael@0 | 2117 | toFPSCR = 0 << 7 // use whatever rounding mode the fpscr specifies |
michael@0 | 2118 | }; |
michael@0 | 2119 | enum vcvt_Signedness { |
michael@0 | 2120 | toSigned = 1 << 16, |
michael@0 | 2121 | toUnsigned = 0 << 16, |
michael@0 | 2122 | fromSigned = 1 << 7, |
michael@0 | 2123 | fromUnsigned = 0 << 7 |
michael@0 | 2124 | }; |
michael@0 | 2125 | |
michael@0 | 2126 | // our encoding actually allows just the src and the dest (and their types) |
michael@0 | 2127 | // to uniquely specify the encoding that we are going to use. |
michael@0 | 2128 | BufferOffset |
michael@0 | 2129 | Assembler::as_vcvt(VFPRegister vd, VFPRegister vm, bool useFPSCR, |
michael@0 | 2130 | Condition c) |
michael@0 | 2131 | { |
michael@0 | 2132 | // Unlike other cases, the source and dest types cannot be the same |
michael@0 | 2133 | JS_ASSERT(!vd.equiv(vm)); |
michael@0 | 2134 | vfp_size sz = isDouble; |
michael@0 | 2135 | if (vd.isFloat() && vm.isFloat()) { |
michael@0 | 2136 | // Doing a float -> float conversion |
michael@0 | 2137 | if (vm.isSingle()) |
michael@0 | 2138 | sz = isSingle; |
michael@0 | 2139 | return writeVFPInst(sz, c | 0x02B700C0 | |
michael@0 | 2140 | VM(vm) | VD(vd)); |
michael@0 | 2141 | } |
michael@0 | 2142 | |
michael@0 | 2143 | // At least one of the registers should be a float. |
michael@0 | 2144 | vcvt_destFloatness destFloat; |
michael@0 | 2145 | vcvt_Signedness opSign; |
michael@0 | 2146 | vcvt_toZero doToZero = toFPSCR; |
michael@0 | 2147 | JS_ASSERT(vd.isFloat() || vm.isFloat()); |
michael@0 | 2148 | if (vd.isSingle() || vm.isSingle()) { |
michael@0 | 2149 | sz = isSingle; |
michael@0 | 2150 | } |
michael@0 | 2151 | if (vd.isFloat()) { |
michael@0 | 2152 | destFloat = toFloat; |
michael@0 | 2153 | opSign = (vm.isSInt()) ? fromSigned : fromUnsigned; |
michael@0 | 2154 | } else { |
michael@0 | 2155 | destFloat = toInteger; |
michael@0 | 2156 | opSign = (vd.isSInt()) ? toSigned : toUnsigned; |
michael@0 | 2157 | doToZero = useFPSCR ? toFPSCR : toZero; |
michael@0 | 2158 | } |
michael@0 | 2159 | return writeVFPInst(sz, c | 0x02B80040 | VD(vd) | VM(vm) | destFloat | opSign | doToZero); |
michael@0 | 2160 | } |
michael@0 | 2161 | |
michael@0 | 2162 | BufferOffset |
michael@0 | 2163 | Assembler::as_vcvtFixed(VFPRegister vd, bool isSigned, uint32_t fixedPoint, bool toFixed, Condition c) |
michael@0 | 2164 | { |
michael@0 | 2165 | JS_ASSERT(vd.isFloat()); |
michael@0 | 2166 | uint32_t sx = 0x1; |
michael@0 | 2167 | vfp_size sf = vd.isDouble() ? isDouble : isSingle; |
michael@0 | 2168 | int32_t imm5 = fixedPoint; |
michael@0 | 2169 | imm5 = (sx ? 32 : 16) - imm5; |
michael@0 | 2170 | JS_ASSERT(imm5 >= 0); |
michael@0 | 2171 | imm5 = imm5 >> 1 | (imm5 & 1) << 5; |
michael@0 | 2172 | return writeVFPInst(sf, 0x02BA0040 | VD(vd) | toFixed << 18 | sx << 7 | |
michael@0 | 2173 | (!isSigned) << 16 | imm5 | c); |
michael@0 | 2174 | } |
michael@0 | 2175 | |
michael@0 | 2176 | // xfer between VFP and memory |
michael@0 | 2177 | BufferOffset |
michael@0 | 2178 | Assembler::as_vdtr(LoadStore ls, VFPRegister vd, VFPAddr addr, |
michael@0 | 2179 | Condition c /* vfp doesn't have a wb option*/, |
michael@0 | 2180 | uint32_t *dest) |
michael@0 | 2181 | { |
michael@0 | 2182 | vfp_size sz = vd.isDouble() ? isDouble : isSingle; |
michael@0 | 2183 | return writeVFPInst(sz, ls | 0x01000000 | addr.encode() | VD(vd) | c, dest); |
michael@0 | 2184 | } |
michael@0 | 2185 | |
michael@0 | 2186 | // VFP's ldm/stm work differently from the standard arm ones. |
michael@0 | 2187 | // You can only transfer a range |
michael@0 | 2188 | |
michael@0 | 2189 | BufferOffset |
michael@0 | 2190 | Assembler::as_vdtm(LoadStore st, Register rn, VFPRegister vd, int length, |
michael@0 | 2191 | /*also has update conditions*/Condition c) |
michael@0 | 2192 | { |
michael@0 | 2193 | JS_ASSERT(length <= 16 && length >= 0); |
michael@0 | 2194 | vfp_size sz = vd.isDouble() ? isDouble : isSingle; |
michael@0 | 2195 | |
michael@0 | 2196 | if (vd.isDouble()) |
michael@0 | 2197 | length *= 2; |
michael@0 | 2198 | |
michael@0 | 2199 | return writeVFPInst(sz, dtmLoadStore | RN(rn) | VD(vd) | |
michael@0 | 2200 | length | |
michael@0 | 2201 | dtmMode | dtmUpdate | dtmCond); |
michael@0 | 2202 | } |
michael@0 | 2203 | |
michael@0 | 2204 | BufferOffset |
michael@0 | 2205 | Assembler::as_vimm(VFPRegister vd, VFPImm imm, Condition c) |
michael@0 | 2206 | { |
michael@0 | 2207 | JS_ASSERT(imm.isValid()); |
michael@0 | 2208 | vfp_size sz = vd.isDouble() ? isDouble : isSingle; |
michael@0 | 2209 | return writeVFPInst(sz, c | imm.encode() | VD(vd) | 0x02B00000); |
michael@0 | 2210 | |
michael@0 | 2211 | } |
michael@0 | 2212 | BufferOffset |
michael@0 | 2213 | Assembler::as_vmrs(Register r, Condition c) |
michael@0 | 2214 | { |
michael@0 | 2215 | return writeInst(c | 0x0ef10a10 | RT(r)); |
michael@0 | 2216 | } |
michael@0 | 2217 | |
michael@0 | 2218 | BufferOffset |
michael@0 | 2219 | Assembler::as_vmsr(Register r, Condition c) |
michael@0 | 2220 | { |
michael@0 | 2221 | return writeInst(c | 0x0ee10a10 | RT(r)); |
michael@0 | 2222 | } |
michael@0 | 2223 | |
michael@0 | 2224 | bool |
michael@0 | 2225 | Assembler::nextLink(BufferOffset b, BufferOffset *next) |
michael@0 | 2226 | { |
michael@0 | 2227 | Instruction branch = *editSrc(b); |
michael@0 | 2228 | JS_ASSERT(branch.is<InstBranchImm>()); |
michael@0 | 2229 | |
michael@0 | 2230 | BOffImm destOff; |
michael@0 | 2231 | branch.as<InstBranchImm>()->extractImm(&destOff); |
michael@0 | 2232 | if (destOff.isInvalid()) |
michael@0 | 2233 | return false; |
michael@0 | 2234 | |
michael@0 | 2235 | // Propagate the next link back to the caller, by |
michael@0 | 2236 | // constructing a new BufferOffset into the space they |
michael@0 | 2237 | // provided. |
michael@0 | 2238 | new (next) BufferOffset(destOff.decode()); |
michael@0 | 2239 | return true; |
michael@0 | 2240 | } |
michael@0 | 2241 | |
michael@0 | 2242 | void |
michael@0 | 2243 | Assembler::bind(Label *label, BufferOffset boff) |
michael@0 | 2244 | { |
michael@0 | 2245 | if (label->used()) { |
michael@0 | 2246 | bool more; |
michael@0 | 2247 | // If our caller didn't give us an explicit target to bind to |
michael@0 | 2248 | // then we want to bind to the location of the next instruction |
michael@0 | 2249 | BufferOffset dest = boff.assigned() ? boff : nextOffset(); |
michael@0 | 2250 | BufferOffset b(label); |
michael@0 | 2251 | do { |
michael@0 | 2252 | BufferOffset next; |
michael@0 | 2253 | more = nextLink(b, &next); |
michael@0 | 2254 | Instruction branch = *editSrc(b); |
michael@0 | 2255 | Condition c; |
michael@0 | 2256 | branch.extractCond(&c); |
michael@0 | 2257 | if (branch.is<InstBImm>()) |
michael@0 | 2258 | as_b(dest.diffB<BOffImm>(b), c, b); |
michael@0 | 2259 | else if (branch.is<InstBLImm>()) |
michael@0 | 2260 | as_bl(dest.diffB<BOffImm>(b), c, b); |
michael@0 | 2261 | else |
michael@0 | 2262 | MOZ_ASSUME_UNREACHABLE("crazy fixup!"); |
michael@0 | 2263 | b = next; |
michael@0 | 2264 | } while (more); |
michael@0 | 2265 | } |
michael@0 | 2266 | label->bind(nextOffset().getOffset()); |
michael@0 | 2267 | } |
michael@0 | 2268 | |
michael@0 | 2269 | void |
michael@0 | 2270 | Assembler::bind(RepatchLabel *label) |
michael@0 | 2271 | { |
michael@0 | 2272 | BufferOffset dest = nextOffset(); |
michael@0 | 2273 | if (label->used()) { |
michael@0 | 2274 | // If the label has a use, then change this use to refer to |
michael@0 | 2275 | // the bound label; |
michael@0 | 2276 | BufferOffset branchOff(label->offset()); |
michael@0 | 2277 | // Since this was created with a RepatchLabel, the value written in the |
michael@0 | 2278 | // instruction stream is not branch shaped, it is PoolHintData shaped. |
michael@0 | 2279 | Instruction *branch = editSrc(branchOff); |
michael@0 | 2280 | PoolHintPun p; |
michael@0 | 2281 | p.raw = branch->encode(); |
michael@0 | 2282 | Condition cond; |
michael@0 | 2283 | if (p.phd.isValidPoolHint()) |
michael@0 | 2284 | cond = p.phd.getCond(); |
michael@0 | 2285 | else |
michael@0 | 2286 | branch->extractCond(&cond); |
michael@0 | 2287 | as_b(dest.diffB<BOffImm>(branchOff), cond, branchOff); |
michael@0 | 2288 | } |
michael@0 | 2289 | label->bind(dest.getOffset()); |
michael@0 | 2290 | } |
michael@0 | 2291 | |
michael@0 | 2292 | void |
michael@0 | 2293 | Assembler::retarget(Label *label, Label *target) |
michael@0 | 2294 | { |
michael@0 | 2295 | if (label->used()) { |
michael@0 | 2296 | if (target->bound()) { |
michael@0 | 2297 | bind(label, BufferOffset(target)); |
michael@0 | 2298 | } else if (target->used()) { |
michael@0 | 2299 | // The target is not bound but used. Prepend label's branch list |
michael@0 | 2300 | // onto target's. |
michael@0 | 2301 | BufferOffset labelBranchOffset(label); |
michael@0 | 2302 | BufferOffset next; |
michael@0 | 2303 | |
michael@0 | 2304 | // Find the head of the use chain for label. |
michael@0 | 2305 | while (nextLink(labelBranchOffset, &next)) |
michael@0 | 2306 | labelBranchOffset = next; |
michael@0 | 2307 | |
michael@0 | 2308 | // Then patch the head of label's use chain to the tail of |
michael@0 | 2309 | // target's use chain, prepending the entire use chain of target. |
michael@0 | 2310 | Instruction branch = *editSrc(labelBranchOffset); |
michael@0 | 2311 | Condition c; |
michael@0 | 2312 | branch.extractCond(&c); |
michael@0 | 2313 | int32_t prev = target->use(label->offset()); |
michael@0 | 2314 | if (branch.is<InstBImm>()) |
michael@0 | 2315 | as_b(BOffImm(prev), c, labelBranchOffset); |
michael@0 | 2316 | else if (branch.is<InstBLImm>()) |
michael@0 | 2317 | as_bl(BOffImm(prev), c, labelBranchOffset); |
michael@0 | 2318 | else |
michael@0 | 2319 | MOZ_ASSUME_UNREACHABLE("crazy fixup!"); |
michael@0 | 2320 | } else { |
michael@0 | 2321 | // The target is unbound and unused. We can just take the head of |
michael@0 | 2322 | // the list hanging off of label, and dump that into target. |
michael@0 | 2323 | DebugOnly<uint32_t> prev = target->use(label->offset()); |
michael@0 | 2324 | JS_ASSERT((int32_t)prev == Label::INVALID_OFFSET); |
michael@0 | 2325 | } |
michael@0 | 2326 | } |
michael@0 | 2327 | label->reset(); |
michael@0 | 2328 | |
michael@0 | 2329 | } |
michael@0 | 2330 | |
michael@0 | 2331 | |
michael@0 | 2332 | void dbg_break() {} |
michael@0 | 2333 | static int stopBKPT = -1; |
michael@0 | 2334 | void |
michael@0 | 2335 | Assembler::as_bkpt() |
michael@0 | 2336 | { |
michael@0 | 2337 | // This is a count of how many times a breakpoint instruction has been generated. |
michael@0 | 2338 | // It is embedded into the instruction for debugging purposes. gdb will print "bkpt xxx" |
michael@0 | 2339 | // when you attempt to dissassemble a breakpoint with the number xxx embedded into it. |
michael@0 | 2340 | // If this breakpoint is being hit, then you can run (in gdb) |
michael@0 | 2341 | // >b dbg_break |
michael@0 | 2342 | // >b main |
michael@0 | 2343 | // >commands |
michael@0 | 2344 | // >set stopBKPT = xxx |
michael@0 | 2345 | // >c |
michael@0 | 2346 | // >end |
michael@0 | 2347 | |
michael@0 | 2348 | // which will set a breakpoint on the function dbg_break above |
michael@0 | 2349 | // set a scripted breakpoint on main that will set the (otherwise unmodified) |
michael@0 | 2350 | // value to the number of the breakpoint, so dbg_break will actuall be called |
michael@0 | 2351 | // and finally, when you run the executable, execution will halt when that |
michael@0 | 2352 | // breakpoint is generated |
michael@0 | 2353 | static int hit = 0; |
michael@0 | 2354 | if (stopBKPT == hit) |
michael@0 | 2355 | dbg_break(); |
michael@0 | 2356 | writeInst(0xe1200070 | (hit & 0xf) | ((hit & 0xfff0)<<4)); |
michael@0 | 2357 | hit++; |
michael@0 | 2358 | } |
michael@0 | 2359 | |
michael@0 | 2360 | void |
michael@0 | 2361 | Assembler::dumpPool() |
michael@0 | 2362 | { |
michael@0 | 2363 | m_buffer.flushPool(); |
michael@0 | 2364 | } |
michael@0 | 2365 | |
michael@0 | 2366 | void |
michael@0 | 2367 | Assembler::flushBuffer() |
michael@0 | 2368 | { |
michael@0 | 2369 | m_buffer.flushPool(); |
michael@0 | 2370 | } |
michael@0 | 2371 | |
michael@0 | 2372 | void |
michael@0 | 2373 | Assembler::enterNoPool() |
michael@0 | 2374 | { |
michael@0 | 2375 | m_buffer.enterNoPool(); |
michael@0 | 2376 | } |
michael@0 | 2377 | |
michael@0 | 2378 | void |
michael@0 | 2379 | Assembler::leaveNoPool() |
michael@0 | 2380 | { |
michael@0 | 2381 | m_buffer.leaveNoPool(); |
michael@0 | 2382 | } |
michael@0 | 2383 | |
michael@0 | 2384 | ptrdiff_t |
michael@0 | 2385 | Assembler::getBranchOffset(const Instruction *i_) |
michael@0 | 2386 | { |
michael@0 | 2387 | if (!i_->is<InstBranchImm>()) |
michael@0 | 2388 | return 0; |
michael@0 | 2389 | |
michael@0 | 2390 | InstBranchImm *i = i_->as<InstBranchImm>(); |
michael@0 | 2391 | BOffImm dest; |
michael@0 | 2392 | i->extractImm(&dest); |
michael@0 | 2393 | return dest.decode(); |
michael@0 | 2394 | } |
michael@0 | 2395 | void |
michael@0 | 2396 | Assembler::retargetNearBranch(Instruction *i, int offset, bool final) |
michael@0 | 2397 | { |
michael@0 | 2398 | Assembler::Condition c; |
michael@0 | 2399 | i->extractCond(&c); |
michael@0 | 2400 | retargetNearBranch(i, offset, c, final); |
michael@0 | 2401 | } |
michael@0 | 2402 | |
michael@0 | 2403 | void |
michael@0 | 2404 | Assembler::retargetNearBranch(Instruction *i, int offset, Condition cond, bool final) |
michael@0 | 2405 | { |
michael@0 | 2406 | // Retargeting calls is totally unsupported! |
michael@0 | 2407 | JS_ASSERT_IF(i->is<InstBranchImm>(), i->is<InstBImm>() || i->is<InstBLImm>()); |
michael@0 | 2408 | if (i->is<InstBLImm>()) |
michael@0 | 2409 | new (i) InstBLImm(BOffImm(offset), cond); |
michael@0 | 2410 | else |
michael@0 | 2411 | new (i) InstBImm(BOffImm(offset), cond); |
michael@0 | 2412 | |
michael@0 | 2413 | // Flush the cache, since an instruction was overwritten |
michael@0 | 2414 | if (final) |
michael@0 | 2415 | AutoFlushICache::flush(uintptr_t(i), 4); |
michael@0 | 2416 | } |
michael@0 | 2417 | |
michael@0 | 2418 | void |
michael@0 | 2419 | Assembler::retargetFarBranch(Instruction *i, uint8_t **slot, uint8_t *dest, Condition cond) |
michael@0 | 2420 | { |
michael@0 | 2421 | int32_t offset = reinterpret_cast<uint8_t*>(slot) - reinterpret_cast<uint8_t*>(i); |
michael@0 | 2422 | if (!i->is<InstLDR>()) { |
michael@0 | 2423 | new (i) InstLDR(Offset, pc, DTRAddr(pc, DtrOffImm(offset - 8)), cond); |
michael@0 | 2424 | AutoFlushICache::flush(uintptr_t(i), 4); |
michael@0 | 2425 | } |
michael@0 | 2426 | *slot = dest; |
michael@0 | 2427 | |
michael@0 | 2428 | } |
michael@0 | 2429 | |
michael@0 | 2430 | struct PoolHeader : Instruction { |
michael@0 | 2431 | struct Header |
michael@0 | 2432 | { |
michael@0 | 2433 | // size should take into account the pool header. |
michael@0 | 2434 | // size is in units of Instruction (4bytes), not byte |
michael@0 | 2435 | uint32_t size : 15; |
michael@0 | 2436 | bool isNatural : 1; |
michael@0 | 2437 | uint32_t ONES : 16; |
michael@0 | 2438 | |
michael@0 | 2439 | Header(int size_, bool isNatural_) |
michael@0 | 2440 | : size(size_), |
michael@0 | 2441 | isNatural(isNatural_), |
michael@0 | 2442 | ONES(0xffff) |
michael@0 | 2443 | { } |
michael@0 | 2444 | |
michael@0 | 2445 | Header(const Instruction *i) { |
michael@0 | 2446 | JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t)); |
michael@0 | 2447 | memcpy(this, i, sizeof(Header)); |
michael@0 | 2448 | JS_ASSERT(ONES == 0xffff); |
michael@0 | 2449 | } |
michael@0 | 2450 | |
michael@0 | 2451 | uint32_t raw() const { |
michael@0 | 2452 | JS_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t)); |
michael@0 | 2453 | uint32_t dest; |
michael@0 | 2454 | memcpy(&dest, this, sizeof(Header)); |
michael@0 | 2455 | return dest; |
michael@0 | 2456 | } |
michael@0 | 2457 | }; |
michael@0 | 2458 | |
michael@0 | 2459 | PoolHeader(int size_, bool isNatural_) |
michael@0 | 2460 | : Instruction(Header(size_, isNatural_).raw(), true) |
michael@0 | 2461 | { } |
michael@0 | 2462 | |
michael@0 | 2463 | uint32_t size() const { |
michael@0 | 2464 | Header tmp(this); |
michael@0 | 2465 | return tmp.size; |
michael@0 | 2466 | } |
michael@0 | 2467 | uint32_t isNatural() const { |
michael@0 | 2468 | Header tmp(this); |
michael@0 | 2469 | return tmp.isNatural; |
michael@0 | 2470 | } |
michael@0 | 2471 | static bool isTHIS(const Instruction &i) { |
michael@0 | 2472 | return (*i.raw() & 0xffff0000) == 0xffff0000; |
michael@0 | 2473 | } |
michael@0 | 2474 | static const PoolHeader *asTHIS(const Instruction &i) { |
michael@0 | 2475 | if (!isTHIS(i)) |
michael@0 | 2476 | return nullptr; |
michael@0 | 2477 | return static_cast<const PoolHeader*>(&i); |
michael@0 | 2478 | } |
michael@0 | 2479 | }; |
michael@0 | 2480 | |
michael@0 | 2481 | |
michael@0 | 2482 | void |
michael@0 | 2483 | Assembler::writePoolHeader(uint8_t *start, Pool *p, bool isNatural) |
michael@0 | 2484 | { |
michael@0 | 2485 | STATIC_ASSERT(sizeof(PoolHeader) == 4); |
michael@0 | 2486 | uint8_t *pool = start+4; |
michael@0 | 2487 | // go through the usual rigaramarole to get the size of the pool. |
michael@0 | 2488 | pool = p[0].addPoolSize(pool); |
michael@0 | 2489 | pool = p[1].addPoolSize(pool); |
michael@0 | 2490 | pool = p[1].other->addPoolSize(pool); |
michael@0 | 2491 | pool = p[0].other->addPoolSize(pool); |
michael@0 | 2492 | uint32_t size = pool - start; |
michael@0 | 2493 | JS_ASSERT((size & 3) == 0); |
michael@0 | 2494 | size = size >> 2; |
michael@0 | 2495 | JS_ASSERT(size < (1 << 15)); |
michael@0 | 2496 | PoolHeader header(size, isNatural); |
michael@0 | 2497 | *(PoolHeader*)start = header; |
michael@0 | 2498 | } |
michael@0 | 2499 | |
michael@0 | 2500 | |
michael@0 | 2501 | void |
michael@0 | 2502 | Assembler::writePoolFooter(uint8_t *start, Pool *p, bool isNatural) |
michael@0 | 2503 | { |
michael@0 | 2504 | return; |
michael@0 | 2505 | } |
michael@0 | 2506 | |
michael@0 | 2507 | // The size of an arbitrary 32-bit call in the instruction stream. |
michael@0 | 2508 | // On ARM this sequence is |pc = ldr pc - 4; imm32| given that we |
michael@0 | 2509 | // never reach the imm32. |
michael@0 | 2510 | uint32_t |
michael@0 | 2511 | Assembler::patchWrite_NearCallSize() |
michael@0 | 2512 | { |
michael@0 | 2513 | return sizeof(uint32_t); |
michael@0 | 2514 | } |
michael@0 | 2515 | void |
michael@0 | 2516 | Assembler::patchWrite_NearCall(CodeLocationLabel start, CodeLocationLabel toCall) |
michael@0 | 2517 | { |
michael@0 | 2518 | Instruction *inst = (Instruction *) start.raw(); |
michael@0 | 2519 | // Overwrite whatever instruction used to be here with a call. |
michael@0 | 2520 | // Since the destination is in the same function, it will be within range of the 24<<2 byte |
michael@0 | 2521 | // bl instruction. |
michael@0 | 2522 | uint8_t *dest = toCall.raw(); |
michael@0 | 2523 | new (inst) InstBLImm(BOffImm(dest - (uint8_t*)inst) , Always); |
michael@0 | 2524 | // Ensure everyone sees the code that was just written into memory. |
michael@0 | 2525 | |
michael@0 | 2526 | AutoFlushICache::flush(uintptr_t(inst), 4); |
michael@0 | 2527 | |
michael@0 | 2528 | } |
michael@0 | 2529 | void |
michael@0 | 2530 | Assembler::patchDataWithValueCheck(CodeLocationLabel label, PatchedImmPtr newValue, |
michael@0 | 2531 | PatchedImmPtr expectedValue) |
michael@0 | 2532 | { |
michael@0 | 2533 | Instruction *ptr = (Instruction *) label.raw(); |
michael@0 | 2534 | InstructionIterator iter(ptr); |
michael@0 | 2535 | Register dest; |
michael@0 | 2536 | Assembler::RelocStyle rs; |
michael@0 | 2537 | DebugOnly<const uint32_t *> val = getPtr32Target(&iter, &dest, &rs); |
michael@0 | 2538 | JS_ASSERT((uint32_t)(const uint32_t *)val == uint32_t(expectedValue.value)); |
michael@0 | 2539 | reinterpret_cast<MacroAssemblerARM*>(dummy)->ma_movPatchable(Imm32(int32_t(newValue.value)), |
michael@0 | 2540 | dest, Always, rs, ptr); |
michael@0 | 2541 | // L_LDR won't cause any instructions to be updated. |
michael@0 | 2542 | if (rs != L_LDR) { |
michael@0 | 2543 | AutoFlushICache::flush(uintptr_t(ptr), 4); |
michael@0 | 2544 | AutoFlushICache::flush(uintptr_t(ptr->next()), 4); |
michael@0 | 2545 | } |
michael@0 | 2546 | } |
michael@0 | 2547 | |
michael@0 | 2548 | void |
michael@0 | 2549 | Assembler::patchDataWithValueCheck(CodeLocationLabel label, ImmPtr newValue, ImmPtr expectedValue) |
michael@0 | 2550 | { |
michael@0 | 2551 | patchDataWithValueCheck(label, PatchedImmPtr(newValue.value), PatchedImmPtr(expectedValue.value)); |
michael@0 | 2552 | } |
michael@0 | 2553 | |
michael@0 | 2554 | // This just stomps over memory with 32 bits of raw data. Its purpose is to |
michael@0 | 2555 | // overwrite the call of JITed code with 32 bits worth of an offset. This will |
michael@0 | 2556 | // is only meant to function on code that has been invalidated, so it should |
michael@0 | 2557 | // be totally safe. Since that instruction will never be executed again, a |
michael@0 | 2558 | // ICache flush should not be necessary |
michael@0 | 2559 | void |
michael@0 | 2560 | Assembler::patchWrite_Imm32(CodeLocationLabel label, Imm32 imm) { |
michael@0 | 2561 | // Raw is going to be the return address. |
michael@0 | 2562 | uint32_t *raw = (uint32_t*)label.raw(); |
michael@0 | 2563 | // Overwrite the 4 bytes before the return address, which will |
michael@0 | 2564 | // end up being the call instruction. |
michael@0 | 2565 | *(raw-1) = imm.value; |
michael@0 | 2566 | } |
michael@0 | 2567 | |
michael@0 | 2568 | |
michael@0 | 2569 | uint8_t * |
michael@0 | 2570 | Assembler::nextInstruction(uint8_t *inst_, uint32_t *count) |
michael@0 | 2571 | { |
michael@0 | 2572 | Instruction *inst = reinterpret_cast<Instruction*>(inst_); |
michael@0 | 2573 | if (count != nullptr) |
michael@0 | 2574 | *count += sizeof(Instruction); |
michael@0 | 2575 | return reinterpret_cast<uint8_t*>(inst->next()); |
michael@0 | 2576 | } |
michael@0 | 2577 | |
michael@0 | 2578 | static bool |
michael@0 | 2579 | InstIsGuard(Instruction *inst, const PoolHeader **ph) |
michael@0 | 2580 | { |
michael@0 | 2581 | Assembler::Condition c; |
michael@0 | 2582 | inst->extractCond(&c); |
michael@0 | 2583 | if (c != Assembler::Always) |
michael@0 | 2584 | return false; |
michael@0 | 2585 | if (!(inst->is<InstBXReg>() || inst->is<InstBImm>())) |
michael@0 | 2586 | return false; |
michael@0 | 2587 | // See if the next instruction is a pool header. |
michael@0 | 2588 | *ph = (inst+1)->as<const PoolHeader>(); |
michael@0 | 2589 | return *ph != nullptr; |
michael@0 | 2590 | } |
michael@0 | 2591 | |
michael@0 | 2592 | static bool |
michael@0 | 2593 | InstIsBNop(Instruction *inst) { |
michael@0 | 2594 | // In some special situations, it is necessary to insert a NOP |
michael@0 | 2595 | // into the instruction stream that nobody knows about, since nobody should know about |
michael@0 | 2596 | // it, make sure it gets skipped when Instruction::next() is called. |
michael@0 | 2597 | // this generates a very specific nop, namely a branch to the next instruction. |
michael@0 | 2598 | Assembler::Condition c; |
michael@0 | 2599 | inst->extractCond(&c); |
michael@0 | 2600 | if (c != Assembler::Always) |
michael@0 | 2601 | return false; |
michael@0 | 2602 | if (!inst->is<InstBImm>()) |
michael@0 | 2603 | return false; |
michael@0 | 2604 | InstBImm *b = inst->as<InstBImm>(); |
michael@0 | 2605 | BOffImm offset; |
michael@0 | 2606 | b->extractImm(&offset); |
michael@0 | 2607 | return offset.decode() == 4; |
michael@0 | 2608 | } |
michael@0 | 2609 | |
michael@0 | 2610 | static bool |
michael@0 | 2611 | InstIsArtificialGuard(Instruction *inst, const PoolHeader **ph) |
michael@0 | 2612 | { |
michael@0 | 2613 | if (!InstIsGuard(inst, ph)) |
michael@0 | 2614 | return false; |
michael@0 | 2615 | return !(*ph)->isNatural(); |
michael@0 | 2616 | } |
michael@0 | 2617 | |
michael@0 | 2618 | // Cases to be handled: |
michael@0 | 2619 | // 1) no pools or branches in sight => return this+1 |
michael@0 | 2620 | // 2) branch to next instruction => return this+2, because a nop needed to be inserted into the stream. |
michael@0 | 2621 | // 3) this+1 is an artificial guard for a pool => return first instruction after the pool |
michael@0 | 2622 | // 4) this+1 is a natural guard => return the branch |
michael@0 | 2623 | // 5) this is a branch, right before a pool => return first instruction after the pool |
michael@0 | 2624 | // in assembly form: |
michael@0 | 2625 | // 1) add r0, r0, r0 <= this |
michael@0 | 2626 | // add r1, r1, r1 <= returned value |
michael@0 | 2627 | // add r2, r2, r2 |
michael@0 | 2628 | // |
michael@0 | 2629 | // 2) add r0, r0, r0 <= this |
michael@0 | 2630 | // b foo |
michael@0 | 2631 | // foo: |
michael@0 | 2632 | // add r2, r2, r2 <= returned value |
michael@0 | 2633 | // |
michael@0 | 2634 | // 3) add r0, r0, r0 <= this |
michael@0 | 2635 | // b after_pool; |
michael@0 | 2636 | // .word 0xffff0002 # bit 15 being 0 indicates that the branch was not requested by the assembler |
michael@0 | 2637 | // 0xdeadbeef # the 2 indicates that there is 1 pool entry, and the pool header |
michael@0 | 2638 | // add r4, r4, r4 <= returned value |
michael@0 | 2639 | // 4) add r0, r0, r0 <= this |
michael@0 | 2640 | // b after_pool <= returned value |
michael@0 | 2641 | // .word 0xffff8002 # bit 15 being 1 indicates that the branch was requested by the assembler |
michael@0 | 2642 | // 0xdeadbeef |
michael@0 | 2643 | // add r4, r4, r4 |
michael@0 | 2644 | // 5) b after_pool <= this |
michael@0 | 2645 | // .word 0xffff8002 # bit 15 has no bearing on the returned value |
michael@0 | 2646 | // 0xdeadbeef |
michael@0 | 2647 | // add r4, r4, r4 <= returned value |
michael@0 | 2648 | |
michael@0 | 2649 | Instruction * |
michael@0 | 2650 | Instruction::next() |
michael@0 | 2651 | { |
michael@0 | 2652 | Instruction *ret = this+1; |
michael@0 | 2653 | const PoolHeader *ph; |
michael@0 | 2654 | // If this is a guard, and the next instruction is a header, always work around the pool |
michael@0 | 2655 | // If it isn't a guard, then start looking ahead. |
michael@0 | 2656 | if (InstIsGuard(this, &ph)) |
michael@0 | 2657 | return ret + ph->size(); |
michael@0 | 2658 | if (InstIsArtificialGuard(ret, &ph)) |
michael@0 | 2659 | return ret + 1 + ph->size(); |
michael@0 | 2660 | if (InstIsBNop(ret)) |
michael@0 | 2661 | return ret + 1; |
michael@0 | 2662 | return ret; |
michael@0 | 2663 | } |
michael@0 | 2664 | |
michael@0 | 2665 | void |
michael@0 | 2666 | Assembler::ToggleToJmp(CodeLocationLabel inst_) |
michael@0 | 2667 | { |
michael@0 | 2668 | uint32_t *ptr = (uint32_t *)inst_.raw(); |
michael@0 | 2669 | |
michael@0 | 2670 | DebugOnly<Instruction *> inst = (Instruction *)inst_.raw(); |
michael@0 | 2671 | JS_ASSERT(inst->is<InstCMP>()); |
michael@0 | 2672 | |
michael@0 | 2673 | // Zero bits 20-27, then set 24-27 to be correct for a branch. |
michael@0 | 2674 | // 20-23 will be party of the B's immediate, and should be 0. |
michael@0 | 2675 | *ptr = (*ptr & ~(0xff << 20)) | (0xa0 << 20); |
michael@0 | 2676 | AutoFlushICache::flush(uintptr_t(ptr), 4); |
michael@0 | 2677 | } |
michael@0 | 2678 | |
michael@0 | 2679 | void |
michael@0 | 2680 | Assembler::ToggleToCmp(CodeLocationLabel inst_) |
michael@0 | 2681 | { |
michael@0 | 2682 | uint32_t *ptr = (uint32_t *)inst_.raw(); |
michael@0 | 2683 | |
michael@0 | 2684 | DebugOnly<Instruction *> inst = (Instruction *)inst_.raw(); |
michael@0 | 2685 | JS_ASSERT(inst->is<InstBImm>()); |
michael@0 | 2686 | |
michael@0 | 2687 | // Ensure that this masking operation doesn't affect the offset of the |
michael@0 | 2688 | // branch instruction when it gets toggled back. |
michael@0 | 2689 | JS_ASSERT((*ptr & (0xf << 20)) == 0); |
michael@0 | 2690 | |
michael@0 | 2691 | // Also make sure that the CMP is valid. Part of having a valid CMP is that |
michael@0 | 2692 | // all of the bits describing the destination in most ALU instructions are |
michael@0 | 2693 | // all unset (looks like it is encoding r0). |
michael@0 | 2694 | JS_ASSERT(toRD(*inst) == r0); |
michael@0 | 2695 | |
michael@0 | 2696 | // Zero out bits 20-27, then set them to be correct for a compare. |
michael@0 | 2697 | *ptr = (*ptr & ~(0xff << 20)) | (0x35 << 20); |
michael@0 | 2698 | |
michael@0 | 2699 | AutoFlushICache::flush(uintptr_t(ptr), 4); |
michael@0 | 2700 | } |
michael@0 | 2701 | |
michael@0 | 2702 | void |
michael@0 | 2703 | Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled) |
michael@0 | 2704 | { |
michael@0 | 2705 | Instruction *inst = (Instruction *)inst_.raw(); |
michael@0 | 2706 | JS_ASSERT(inst->is<InstMovW>() || inst->is<InstLDR>()); |
michael@0 | 2707 | |
michael@0 | 2708 | if (inst->is<InstMovW>()) { |
michael@0 | 2709 | // If it looks like the start of a movw/movt sequence, |
michael@0 | 2710 | // then make sure we have all of it (and advance the iterator |
michael@0 | 2711 | // past the full sequence) |
michael@0 | 2712 | inst = inst->next(); |
michael@0 | 2713 | JS_ASSERT(inst->is<InstMovT>()); |
michael@0 | 2714 | } |
michael@0 | 2715 | |
michael@0 | 2716 | inst = inst->next(); |
michael@0 | 2717 | JS_ASSERT(inst->is<InstNOP>() || inst->is<InstBLXReg>()); |
michael@0 | 2718 | |
michael@0 | 2719 | if (enabled == inst->is<InstBLXReg>()) { |
michael@0 | 2720 | // Nothing to do. |
michael@0 | 2721 | return; |
michael@0 | 2722 | } |
michael@0 | 2723 | |
michael@0 | 2724 | if (enabled) |
michael@0 | 2725 | *inst = InstBLXReg(ScratchRegister, Always); |
michael@0 | 2726 | else |
michael@0 | 2727 | *inst = InstNOP(); |
michael@0 | 2728 | |
michael@0 | 2729 | AutoFlushICache::flush(uintptr_t(inst), 4); |
michael@0 | 2730 | } |
michael@0 | 2731 | |
michael@0 | 2732 | void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst) |
michael@0 | 2733 | { |
michael@0 | 2734 | JS_ASSERT(inst->is<InstCMP>()); |
michael@0 | 2735 | InstCMP *cmp = inst->as<InstCMP>(); |
michael@0 | 2736 | |
michael@0 | 2737 | Register index; |
michael@0 | 2738 | cmp->extractOp1(&index); |
michael@0 | 2739 | |
michael@0 | 2740 | Operand2 op = cmp->extractOp2(); |
michael@0 | 2741 | JS_ASSERT(op.isImm8()); |
michael@0 | 2742 | |
michael@0 | 2743 | Imm8 imm8 = Imm8(heapSize); |
michael@0 | 2744 | JS_ASSERT(!imm8.invalid); |
michael@0 | 2745 | |
michael@0 | 2746 | *inst = InstALU(InvalidReg, index, imm8, op_cmp, SetCond, Always); |
michael@0 | 2747 | // NOTE: we don't update the Auto Flush Cache! this function is currently only called from |
michael@0 | 2748 | // within AsmJSModule::patchHeapAccesses, which does that for us. Don't call this! |
michael@0 | 2749 | } |
michael@0 | 2750 | |
michael@0 | 2751 | InstructionIterator::InstructionIterator(Instruction *i_) : i(i_) { |
michael@0 | 2752 | const PoolHeader *ph; |
michael@0 | 2753 | // If this is a guard, and the next instruction is a header, always work around the pool |
michael@0 | 2754 | // If it isn't a guard, then start looking ahead. |
michael@0 | 2755 | if (InstIsArtificialGuard(i, &ph)) { |
michael@0 | 2756 | i = i->next(); |
michael@0 | 2757 | } |
michael@0 | 2758 | } |
michael@0 | 2759 | Assembler *Assembler::dummy = nullptr; |