js/src/jit/IonMacroAssembler.h

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

michael@0 1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
michael@0 2 * vim: set ts=8 sts=4 et sw=4 tw=99:
michael@0 3 * This Source Code Form is subject to the terms of the Mozilla Public
michael@0 4 * License, v. 2.0. If a copy of the MPL was not distributed with this
michael@0 5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
michael@0 6
michael@0 7 #ifndef jit_IonMacroAssembler_h
michael@0 8 #define jit_IonMacroAssembler_h
michael@0 9
michael@0 10 #ifdef JS_ION
michael@0 11
michael@0 12 #include "jscompartment.h"
michael@0 13
michael@0 14 #if defined(JS_CODEGEN_X86)
michael@0 15 # include "jit/x86/MacroAssembler-x86.h"
michael@0 16 #elif defined(JS_CODEGEN_X64)
michael@0 17 # include "jit/x64/MacroAssembler-x64.h"
michael@0 18 #elif defined(JS_CODEGEN_ARM)
michael@0 19 # include "jit/arm/MacroAssembler-arm.h"
michael@0 20 #elif defined(JS_CODEGEN_MIPS)
michael@0 21 # include "jit/mips/MacroAssembler-mips.h"
michael@0 22 #else
michael@0 23 # error "Unknown architecture!"
michael@0 24 #endif
michael@0 25 #include "jit/IonInstrumentation.h"
michael@0 26 #include "jit/JitCompartment.h"
michael@0 27 #include "jit/VMFunctions.h"
michael@0 28 #include "vm/ProxyObject.h"
michael@0 29 #include "vm/Shape.h"
michael@0 30
michael@0 31 namespace js {
michael@0 32 namespace jit {
michael@0 33
michael@0 34 // The public entrypoint for emitting assembly. Note that a MacroAssembler can
michael@0 35 // use cx->lifoAlloc, so take care not to interleave masm use with other
michael@0 36 // lifoAlloc use if one will be destroyed before the other.
michael@0 37 class MacroAssembler : public MacroAssemblerSpecific
michael@0 38 {
michael@0 39 MacroAssembler *thisFromCtor() {
michael@0 40 return this;
michael@0 41 }
michael@0 42
michael@0 43 public:
michael@0 44 class AutoRooter : public AutoGCRooter
michael@0 45 {
michael@0 46 MacroAssembler *masm_;
michael@0 47
michael@0 48 public:
michael@0 49 AutoRooter(JSContext *cx, MacroAssembler *masm)
michael@0 50 : AutoGCRooter(cx, IONMASM),
michael@0 51 masm_(masm)
michael@0 52 { }
michael@0 53
michael@0 54 MacroAssembler *masm() const {
michael@0 55 return masm_;
michael@0 56 }
michael@0 57 };
michael@0 58
michael@0 59 /*
michael@0 60 * Base class for creating a branch.
michael@0 61 */
michael@0 62 class Branch
michael@0 63 {
michael@0 64 bool init_;
michael@0 65 Condition cond_;
michael@0 66 Label *jump_;
michael@0 67 Register reg_;
michael@0 68
michael@0 69 public:
michael@0 70 Branch()
michael@0 71 : init_(false),
michael@0 72 cond_(Equal),
michael@0 73 jump_(nullptr),
michael@0 74 reg_(Register::FromCode(0)) // Quell compiler warnings.
michael@0 75 { }
michael@0 76
michael@0 77 Branch(Condition cond, Register reg, Label *jump)
michael@0 78 : init_(true),
michael@0 79 cond_(cond),
michael@0 80 jump_(jump),
michael@0 81 reg_(reg)
michael@0 82 { }
michael@0 83
michael@0 84 bool isInitialized() const {
michael@0 85 return init_;
michael@0 86 }
michael@0 87
michael@0 88 Condition cond() const {
michael@0 89 return cond_;
michael@0 90 }
michael@0 91
michael@0 92 Label *jump() const {
michael@0 93 return jump_;
michael@0 94 }
michael@0 95
michael@0 96 Register reg() const {
michael@0 97 return reg_;
michael@0 98 }
michael@0 99
michael@0 100 void invertCondition() {
michael@0 101 cond_ = InvertCondition(cond_);
michael@0 102 }
michael@0 103
michael@0 104 void relink(Label *jump) {
michael@0 105 jump_ = jump;
michael@0 106 }
michael@0 107
michael@0 108 virtual void emit(MacroAssembler &masm) = 0;
michael@0 109 };
michael@0 110
michael@0 111 /*
michael@0 112 * Creates a branch based on a specific types::Type.
michael@0 113 * Note: emits number test (int/double) for types::Type::DoubleType()
michael@0 114 */
michael@0 115 class BranchType : public Branch
michael@0 116 {
michael@0 117 types::Type type_;
michael@0 118
michael@0 119 public:
michael@0 120 BranchType()
michael@0 121 : Branch(),
michael@0 122 type_(types::Type::UnknownType())
michael@0 123 { }
michael@0 124
michael@0 125 BranchType(Condition cond, Register reg, types::Type type, Label *jump)
michael@0 126 : Branch(cond, reg, jump),
michael@0 127 type_(type)
michael@0 128 { }
michael@0 129
michael@0 130 void emit(MacroAssembler &masm) {
michael@0 131 JS_ASSERT(isInitialized());
michael@0 132 MIRType mirType = MIRType_None;
michael@0 133
michael@0 134 if (type_.isPrimitive()) {
michael@0 135 if (type_.isMagicArguments())
michael@0 136 mirType = MIRType_MagicOptimizedArguments;
michael@0 137 else
michael@0 138 mirType = MIRTypeFromValueType(type_.primitive());
michael@0 139 } else if (type_.isAnyObject()) {
michael@0 140 mirType = MIRType_Object;
michael@0 141 } else {
michael@0 142 MOZ_ASSUME_UNREACHABLE("Unknown conversion to mirtype");
michael@0 143 }
michael@0 144
michael@0 145 if (mirType == MIRType_Double)
michael@0 146 masm.branchTestNumber(cond(), reg(), jump());
michael@0 147 else
michael@0 148 masm.branchTestMIRType(cond(), reg(), mirType, jump());
michael@0 149 }
michael@0 150
michael@0 151 };
michael@0 152
michael@0 153 /*
michael@0 154 * Creates a branch based on a GCPtr.
michael@0 155 */
michael@0 156 class BranchGCPtr : public Branch
michael@0 157 {
michael@0 158 ImmGCPtr ptr_;
michael@0 159
michael@0 160 public:
michael@0 161 BranchGCPtr()
michael@0 162 : Branch(),
michael@0 163 ptr_(ImmGCPtr(nullptr))
michael@0 164 { }
michael@0 165
michael@0 166 BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label *jump)
michael@0 167 : Branch(cond, reg, jump),
michael@0 168 ptr_(ptr)
michael@0 169 { }
michael@0 170
michael@0 171 void emit(MacroAssembler &masm) {
michael@0 172 JS_ASSERT(isInitialized());
michael@0 173 masm.branchPtr(cond(), reg(), ptr_, jump());
michael@0 174 }
michael@0 175 };
michael@0 176
michael@0 177 mozilla::Maybe<AutoRooter> autoRooter_;
michael@0 178 mozilla::Maybe<IonContext> ionContext_;
michael@0 179 mozilla::Maybe<AutoIonContextAlloc> alloc_;
michael@0 180 bool enoughMemory_;
michael@0 181 bool embedsNurseryPointers_;
michael@0 182
michael@0 183 // SPS instrumentation, only used for Ion caches.
michael@0 184 mozilla::Maybe<IonInstrumentation> spsInstrumentation_;
michael@0 185 jsbytecode *spsPc_;
michael@0 186
michael@0 187 private:
michael@0 188 // This field is used to manage profiling instrumentation output. If
michael@0 189 // provided and enabled, then instrumentation will be emitted around call
michael@0 190 // sites. The IonInstrumentation instance is hosted inside of
michael@0 191 // CodeGeneratorShared and is the manager of when instrumentation is
michael@0 192 // actually emitted or not. If nullptr, then no instrumentation is emitted.
michael@0 193 IonInstrumentation *sps_;
michael@0 194
michael@0 195 // Labels for handling exceptions and failures.
michael@0 196 NonAssertingLabel sequentialFailureLabel_;
michael@0 197 NonAssertingLabel parallelFailureLabel_;
michael@0 198
michael@0 199 public:
michael@0 200 // If instrumentation should be emitted, then the sps parameter should be
michael@0 201 // provided, but otherwise it can be safely omitted to prevent all
michael@0 202 // instrumentation from being emitted.
michael@0 203 MacroAssembler()
michael@0 204 : enoughMemory_(true),
michael@0 205 embedsNurseryPointers_(false),
michael@0 206 sps_(nullptr)
michael@0 207 {
michael@0 208 IonContext *icx = GetIonContext();
michael@0 209 JSContext *cx = icx->cx;
michael@0 210 if (cx)
michael@0 211 constructRoot(cx);
michael@0 212
michael@0 213 if (!icx->temp) {
michael@0 214 JS_ASSERT(cx);
michael@0 215 alloc_.construct(cx);
michael@0 216 }
michael@0 217
michael@0 218 moveResolver_.setAllocator(*icx->temp);
michael@0 219 #ifdef JS_CODEGEN_ARM
michael@0 220 initWithAllocator();
michael@0 221 m_buffer.id = icx->getNextAssemblerId();
michael@0 222 #endif
michael@0 223 }
michael@0 224
michael@0 225 // This constructor should only be used when there is no IonContext active
michael@0 226 // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
michael@0 227 MacroAssembler(JSContext *cx, IonScript *ion = nullptr,
michael@0 228 JSScript *script = nullptr, jsbytecode *pc = nullptr)
michael@0 229 : enoughMemory_(true),
michael@0 230 embedsNurseryPointers_(false),
michael@0 231 sps_(nullptr)
michael@0 232 {
michael@0 233 constructRoot(cx);
michael@0 234 ionContext_.construct(cx, (js::jit::TempAllocator *)nullptr);
michael@0 235 alloc_.construct(cx);
michael@0 236 moveResolver_.setAllocator(*ionContext_.ref().temp);
michael@0 237 #ifdef JS_CODEGEN_ARM
michael@0 238 initWithAllocator();
michael@0 239 m_buffer.id = GetIonContext()->getNextAssemblerId();
michael@0 240 #endif
michael@0 241 if (ion) {
michael@0 242 setFramePushed(ion->frameSize());
michael@0 243 if (pc && cx->runtime()->spsProfiler.enabled()) {
michael@0 244 // We have to update the SPS pc when this IC stub calls into
michael@0 245 // the VM.
michael@0 246 spsPc_ = pc;
michael@0 247 spsInstrumentation_.construct(&cx->runtime()->spsProfiler, &spsPc_);
michael@0 248 sps_ = spsInstrumentation_.addr();
michael@0 249 sps_->setPushed(script);
michael@0 250 }
michael@0 251 }
michael@0 252 }
michael@0 253
michael@0 254 // asm.js compilation handles its own IonContet-pushing
michael@0 255 struct AsmJSToken {};
michael@0 256 MacroAssembler(AsmJSToken)
michael@0 257 : enoughMemory_(true),
michael@0 258 embedsNurseryPointers_(false),
michael@0 259 sps_(nullptr)
michael@0 260 {
michael@0 261 #ifdef JS_CODEGEN_ARM
michael@0 262 initWithAllocator();
michael@0 263 m_buffer.id = 0;
michael@0 264 #endif
michael@0 265 }
michael@0 266
michael@0 267 void setInstrumentation(IonInstrumentation *sps) {
michael@0 268 sps_ = sps;
michael@0 269 }
michael@0 270
michael@0 271 void resetForNewCodeGenerator(TempAllocator &alloc) {
michael@0 272 setFramePushed(0);
michael@0 273 moveResolver_.clearTempObjectPool();
michael@0 274 moveResolver_.setAllocator(alloc);
michael@0 275 }
michael@0 276
michael@0 277 void constructRoot(JSContext *cx) {
michael@0 278 autoRooter_.construct(cx, this);
michael@0 279 }
michael@0 280
michael@0 281 MoveResolver &moveResolver() {
michael@0 282 return moveResolver_;
michael@0 283 }
michael@0 284
michael@0 285 size_t instructionsSize() const {
michael@0 286 return size();
michael@0 287 }
michael@0 288
michael@0 289 void propagateOOM(bool success) {
michael@0 290 enoughMemory_ &= success;
michael@0 291 }
michael@0 292 bool oom() const {
michael@0 293 return !enoughMemory_ || MacroAssemblerSpecific::oom();
michael@0 294 }
michael@0 295
michael@0 296 bool embedsNurseryPointers() const {
michael@0 297 return embedsNurseryPointers_;
michael@0 298 }
michael@0 299
michael@0 300 // Emits a test of a value against all types in a TypeSet. A scratch
michael@0 301 // register is required.
michael@0 302 template <typename Source, typename TypeSet>
michael@0 303 void guardTypeSet(const Source &address, const TypeSet *types, Register scratch, Label *miss);
michael@0 304 template <typename TypeSet>
michael@0 305 void guardObjectType(Register obj, const TypeSet *types, Register scratch, Label *miss);
michael@0 306 template <typename Source>
michael@0 307 void guardType(const Source &address, types::Type type, Register scratch, Label *miss);
michael@0 308
michael@0 309 void loadObjShape(Register objReg, Register dest) {
michael@0 310 loadPtr(Address(objReg, JSObject::offsetOfShape()), dest);
michael@0 311 }
michael@0 312 void loadBaseShape(Register objReg, Register dest) {
michael@0 313 loadPtr(Address(objReg, JSObject::offsetOfShape()), dest);
michael@0 314
michael@0 315 loadPtr(Address(dest, Shape::offsetOfBase()), dest);
michael@0 316 }
michael@0 317 void loadObjClass(Register objReg, Register dest) {
michael@0 318 loadPtr(Address(objReg, JSObject::offsetOfType()), dest);
michael@0 319 loadPtr(Address(dest, types::TypeObject::offsetOfClasp()), dest);
michael@0 320 }
michael@0 321 void branchTestObjClass(Condition cond, Register obj, Register scratch, const js::Class *clasp,
michael@0 322 Label *label) {
michael@0 323 loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
michael@0 324 branchPtr(cond, Address(scratch, types::TypeObject::offsetOfClasp()), ImmPtr(clasp), label);
michael@0 325 }
michael@0 326 void branchTestObjShape(Condition cond, Register obj, const Shape *shape, Label *label) {
michael@0 327 branchPtr(cond, Address(obj, JSObject::offsetOfShape()), ImmGCPtr(shape), label);
michael@0 328 }
michael@0 329 void branchTestObjShape(Condition cond, Register obj, Register shape, Label *label) {
michael@0 330 branchPtr(cond, Address(obj, JSObject::offsetOfShape()), shape, label);
michael@0 331 }
michael@0 332 void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
michael@0 333 const void *handlerp, Label *label) {
michael@0 334 Address handlerAddr(proxy, ProxyObject::offsetOfHandler());
michael@0 335 loadPrivate(handlerAddr, scratch);
michael@0 336 Address familyAddr(scratch, BaseProxyHandler::offsetOfFamily());
michael@0 337 branchPtr(cond, familyAddr, ImmPtr(handlerp), label);
michael@0 338 }
michael@0 339
michael@0 340 template <typename Value>
michael@0 341 void branchTestMIRType(Condition cond, const Value &val, MIRType type, Label *label) {
michael@0 342 switch (type) {
michael@0 343 case MIRType_Null: return branchTestNull(cond, val, label);
michael@0 344 case MIRType_Undefined: return branchTestUndefined(cond, val, label);
michael@0 345 case MIRType_Boolean: return branchTestBoolean(cond, val, label);
michael@0 346 case MIRType_Int32: return branchTestInt32(cond, val, label);
michael@0 347 case MIRType_String: return branchTestString(cond, val, label);
michael@0 348 case MIRType_Object: return branchTestObject(cond, val, label);
michael@0 349 case MIRType_Double: return branchTestDouble(cond, val, label);
michael@0 350 case MIRType_MagicOptimizedArguments: // Fall through.
michael@0 351 case MIRType_MagicIsConstructing:
michael@0 352 case MIRType_MagicHole: return branchTestMagic(cond, val, label);
michael@0 353 default:
michael@0 354 MOZ_ASSUME_UNREACHABLE("Bad MIRType");
michael@0 355 }
michael@0 356 }
michael@0 357
michael@0 358 // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
michael@0 359 void branchIfFalseBool(Register reg, Label *label) {
michael@0 360 // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
michael@0 361 branchTest32(Assembler::Zero, reg, Imm32(0xFF), label);
michael@0 362 }
michael@0 363
michael@0 364 // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
michael@0 365 void branchIfTrueBool(Register reg, Label *label) {
michael@0 366 // Note that C++ bool is only 1 byte, so ignore the higher-order bits.
michael@0 367 branchTest32(Assembler::NonZero, reg, Imm32(0xFF), label);
michael@0 368 }
michael@0 369
michael@0 370 void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
michael@0 371 loadPtr(Address(obj, JSObject::getPrivateDataOffset(nfixed)), dest);
michael@0 372 }
michael@0 373
michael@0 374 void loadObjProto(Register obj, Register dest) {
michael@0 375 loadPtr(Address(obj, JSObject::offsetOfType()), dest);
michael@0 376 loadPtr(Address(dest, types::TypeObject::offsetOfProto()), dest);
michael@0 377 }
michael@0 378
michael@0 379 void loadStringLength(Register str, Register dest) {
michael@0 380 loadPtr(Address(str, JSString::offsetOfLengthAndFlags()), dest);
michael@0 381 rshiftPtr(Imm32(JSString::LENGTH_SHIFT), dest);
michael@0 382 }
michael@0 383
michael@0 384 void loadSliceBounds(Register worker, Register dest) {
michael@0 385 loadPtr(Address(worker, ThreadPoolWorker::offsetOfSliceBounds()), dest);
michael@0 386 }
michael@0 387
michael@0 388 void loadJSContext(Register dest) {
michael@0 389 loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfJSContext()), dest);
michael@0 390 }
michael@0 391 void loadJitActivation(Register dest) {
michael@0 392 loadPtr(AbsoluteAddress(GetIonContext()->runtime->addressOfActivation()), dest);
michael@0 393 }
michael@0 394
michael@0 395 template<typename T>
michael@0 396 void loadTypedOrValue(const T &src, TypedOrValueRegister dest) {
michael@0 397 if (dest.hasValue())
michael@0 398 loadValue(src, dest.valueReg());
michael@0 399 else
michael@0 400 loadUnboxedValue(src, dest.type(), dest.typedReg());
michael@0 401 }
michael@0 402
michael@0 403 template<typename T>
michael@0 404 void loadElementTypedOrValue(const T &src, TypedOrValueRegister dest, bool holeCheck,
michael@0 405 Label *hole) {
michael@0 406 if (dest.hasValue()) {
michael@0 407 loadValue(src, dest.valueReg());
michael@0 408 if (holeCheck)
michael@0 409 branchTestMagic(Assembler::Equal, dest.valueReg(), hole);
michael@0 410 } else {
michael@0 411 if (holeCheck)
michael@0 412 branchTestMagic(Assembler::Equal, src, hole);
michael@0 413 loadUnboxedValue(src, dest.type(), dest.typedReg());
michael@0 414 }
michael@0 415 }
michael@0 416
michael@0 417 template <typename T>
michael@0 418 void storeTypedOrValue(TypedOrValueRegister src, const T &dest) {
michael@0 419 if (src.hasValue()) {
michael@0 420 storeValue(src.valueReg(), dest);
michael@0 421 } else if (IsFloatingPointType(src.type())) {
michael@0 422 FloatRegister reg = src.typedReg().fpu();
michael@0 423 if (src.type() == MIRType_Float32) {
michael@0 424 convertFloat32ToDouble(reg, ScratchFloatReg);
michael@0 425 reg = ScratchFloatReg;
michael@0 426 }
michael@0 427 storeDouble(reg, dest);
michael@0 428 } else {
michael@0 429 storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
michael@0 430 }
michael@0 431 }
michael@0 432
michael@0 433 template <typename T>
michael@0 434 void storeConstantOrRegister(ConstantOrRegister src, const T &dest) {
michael@0 435 if (src.constant())
michael@0 436 storeValue(src.value(), dest);
michael@0 437 else
michael@0 438 storeTypedOrValue(src.reg(), dest);
michael@0 439 }
michael@0 440
michael@0 441 void storeCallResult(Register reg) {
michael@0 442 if (reg != ReturnReg)
michael@0 443 mov(ReturnReg, reg);
michael@0 444 }
michael@0 445
michael@0 446 void storeCallFloatResult(const FloatRegister &reg) {
michael@0 447 if (reg != ReturnFloatReg)
michael@0 448 moveDouble(ReturnFloatReg, reg);
michael@0 449 }
michael@0 450
michael@0 451 void storeCallResultValue(AnyRegister dest) {
michael@0 452 #if defined(JS_NUNBOX32)
michael@0 453 unboxValue(ValueOperand(JSReturnReg_Type, JSReturnReg_Data), dest);
michael@0 454 #elif defined(JS_PUNBOX64)
michael@0 455 unboxValue(ValueOperand(JSReturnReg), dest);
michael@0 456 #else
michael@0 457 #error "Bad architecture"
michael@0 458 #endif
michael@0 459 }
michael@0 460
michael@0 461 void storeCallResultValue(ValueOperand dest) {
michael@0 462 #if defined(JS_NUNBOX32)
michael@0 463 // reshuffle the return registers used for a call result to store into
michael@0 464 // dest, using ReturnReg as a scratch register if necessary. This must
michael@0 465 // only be called after returning from a call, at a point when the
michael@0 466 // return register is not live. XXX would be better to allow wrappers
michael@0 467 // to store the return value to different places.
michael@0 468 if (dest.typeReg() == JSReturnReg_Data) {
michael@0 469 if (dest.payloadReg() == JSReturnReg_Type) {
michael@0 470 // swap the two registers.
michael@0 471 mov(JSReturnReg_Type, ReturnReg);
michael@0 472 mov(JSReturnReg_Data, JSReturnReg_Type);
michael@0 473 mov(ReturnReg, JSReturnReg_Data);
michael@0 474 } else {
michael@0 475 mov(JSReturnReg_Data, dest.payloadReg());
michael@0 476 mov(JSReturnReg_Type, dest.typeReg());
michael@0 477 }
michael@0 478 } else {
michael@0 479 mov(JSReturnReg_Type, dest.typeReg());
michael@0 480 mov(JSReturnReg_Data, dest.payloadReg());
michael@0 481 }
michael@0 482 #elif defined(JS_PUNBOX64)
michael@0 483 if (dest.valueReg() != JSReturnReg)
michael@0 484 movq(JSReturnReg, dest.valueReg());
michael@0 485 #else
michael@0 486 #error "Bad architecture"
michael@0 487 #endif
michael@0 488 }
michael@0 489
michael@0 490 void storeCallResultValue(TypedOrValueRegister dest) {
michael@0 491 if (dest.hasValue())
michael@0 492 storeCallResultValue(dest.valueReg());
michael@0 493 else
michael@0 494 storeCallResultValue(dest.typedReg());
michael@0 495 }
michael@0 496
michael@0 497 template <typename T>
michael@0 498 Register extractString(const T &source, Register scratch) {
michael@0 499 return extractObject(source, scratch);
michael@0 500 }
michael@0 501
michael@0 502 void PushRegsInMask(RegisterSet set);
michael@0 503 void PushRegsInMask(GeneralRegisterSet set) {
michael@0 504 PushRegsInMask(RegisterSet(set, FloatRegisterSet()));
michael@0 505 }
michael@0 506 void PopRegsInMask(RegisterSet set) {
michael@0 507 PopRegsInMaskIgnore(set, RegisterSet());
michael@0 508 }
michael@0 509 void PopRegsInMask(GeneralRegisterSet set) {
michael@0 510 PopRegsInMask(RegisterSet(set, FloatRegisterSet()));
michael@0 511 }
michael@0 512 void PopRegsInMaskIgnore(RegisterSet set, RegisterSet ignore);
michael@0 513
michael@0 514 void branchIfFunctionHasNoScript(Register fun, Label *label) {
michael@0 515 // 16-bit loads are slow and unaligned 32-bit loads may be too so
michael@0 516 // perform an aligned 32-bit load and adjust the bitmask accordingly.
michael@0 517 JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
michael@0 518 JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
michael@0 519 JS_STATIC_ASSERT(IS_LITTLE_ENDIAN);
michael@0 520 Address address(fun, JSFunction::offsetOfNargs());
michael@0 521 uint32_t bit = JSFunction::INTERPRETED << 16;
michael@0 522 branchTest32(Assembler::Zero, address, Imm32(bit), label);
michael@0 523 }
michael@0 524 void branchIfInterpreted(Register fun, Label *label) {
michael@0 525 // 16-bit loads are slow and unaligned 32-bit loads may be too so
michael@0 526 // perform an aligned 32-bit load and adjust the bitmask accordingly.
michael@0 527 JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
michael@0 528 JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
michael@0 529 JS_STATIC_ASSERT(IS_LITTLE_ENDIAN);
michael@0 530 Address address(fun, JSFunction::offsetOfNargs());
michael@0 531 uint32_t bit = JSFunction::INTERPRETED << 16;
michael@0 532 branchTest32(Assembler::NonZero, address, Imm32(bit), label);
michael@0 533 }
michael@0 534
michael@0 535 void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label *label);
michael@0 536
michael@0 537 using MacroAssemblerSpecific::Push;
michael@0 538 using MacroAssemblerSpecific::Pop;
michael@0 539
michael@0 540 void Push(jsid id, Register scratchReg) {
michael@0 541 if (JSID_IS_GCTHING(id)) {
michael@0 542 // If we're pushing a gcthing, then we can't just push the tagged jsid
michael@0 543 // value since the GC won't have any idea that the push instruction
michael@0 544 // carries a reference to a gcthing. Need to unpack the pointer,
michael@0 545 // push it using ImmGCPtr, and then rematerialize the id at runtime.
michael@0 546
michael@0 547 // double-checking this here to ensure we don't lose sync
michael@0 548 // with implementation of JSID_IS_GCTHING.
michael@0 549 if (JSID_IS_OBJECT(id)) {
michael@0 550 JSObject *obj = JSID_TO_OBJECT(id);
michael@0 551 movePtr(ImmGCPtr(obj), scratchReg);
michael@0 552 JS_ASSERT(((size_t)obj & JSID_TYPE_MASK) == 0);
michael@0 553 orPtr(Imm32(JSID_TYPE_OBJECT), scratchReg);
michael@0 554 Push(scratchReg);
michael@0 555 } else {
michael@0 556 JSString *str = JSID_TO_STRING(id);
michael@0 557 JS_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0);
michael@0 558 JS_ASSERT(JSID_TYPE_STRING == 0x0);
michael@0 559 Push(ImmGCPtr(str));
michael@0 560 }
michael@0 561 } else {
michael@0 562 Push(ImmWord(JSID_BITS(id)));
michael@0 563 }
michael@0 564 }
michael@0 565
michael@0 566 void Push(TypedOrValueRegister v) {
michael@0 567 if (v.hasValue()) {
michael@0 568 Push(v.valueReg());
michael@0 569 } else if (IsFloatingPointType(v.type())) {
michael@0 570 FloatRegister reg = v.typedReg().fpu();
michael@0 571 if (v.type() == MIRType_Float32) {
michael@0 572 convertFloat32ToDouble(reg, ScratchFloatReg);
michael@0 573 reg = ScratchFloatReg;
michael@0 574 }
michael@0 575 Push(reg);
michael@0 576 } else {
michael@0 577 Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
michael@0 578 }
michael@0 579 }
michael@0 580
michael@0 581 void Push(ConstantOrRegister v) {
michael@0 582 if (v.constant())
michael@0 583 Push(v.value());
michael@0 584 else
michael@0 585 Push(v.reg());
michael@0 586 }
michael@0 587
michael@0 588 void Push(const ValueOperand &val) {
michael@0 589 pushValue(val);
michael@0 590 framePushed_ += sizeof(Value);
michael@0 591 }
michael@0 592
michael@0 593 void Push(const Value &val) {
michael@0 594 pushValue(val);
michael@0 595 framePushed_ += sizeof(Value);
michael@0 596 }
michael@0 597
michael@0 598 void Push(JSValueType type, Register reg) {
michael@0 599 pushValue(type, reg);
michael@0 600 framePushed_ += sizeof(Value);
michael@0 601 }
michael@0 602
michael@0 603 void PushValue(const Address &addr) {
michael@0 604 JS_ASSERT(addr.base != StackPointer);
michael@0 605 pushValue(addr);
michael@0 606 framePushed_ += sizeof(Value);
michael@0 607 }
michael@0 608
michael@0 609 void PushEmptyRooted(VMFunction::RootType rootType);
michael@0 610 void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand &valueReg);
michael@0 611
michael@0 612 void adjustStack(int amount) {
michael@0 613 if (amount > 0)
michael@0 614 freeStack(amount);
michael@0 615 else if (amount < 0)
michael@0 616 reserveStack(-amount);
michael@0 617 }
michael@0 618
michael@0 619 void bumpKey(Int32Key *key, int diff) {
michael@0 620 if (key->isRegister())
michael@0 621 add32(Imm32(diff), key->reg());
michael@0 622 else
michael@0 623 key->bumpConstant(diff);
michael@0 624 }
michael@0 625
michael@0 626 void storeKey(const Int32Key &key, const Address &dest) {
michael@0 627 if (key.isRegister())
michael@0 628 store32(key.reg(), dest);
michael@0 629 else
michael@0 630 store32(Imm32(key.constant()), dest);
michael@0 631 }
michael@0 632
michael@0 633 template<typename T>
michael@0 634 void branchKey(Condition cond, const T &length, const Int32Key &key, Label *label) {
michael@0 635 if (key.isRegister())
michael@0 636 branch32(cond, length, key.reg(), label);
michael@0 637 else
michael@0 638 branch32(cond, length, Imm32(key.constant()), label);
michael@0 639 }
michael@0 640
michael@0 641 void branchTestNeedsBarrier(Condition cond, Register scratch, Label *label) {
michael@0 642 JS_ASSERT(cond == Zero || cond == NonZero);
michael@0 643 CompileZone *zone = GetIonContext()->compartment->zone();
michael@0 644 movePtr(ImmPtr(zone->addressOfNeedsBarrier()), scratch);
michael@0 645 Address needsBarrierAddr(scratch, 0);
michael@0 646 branchTest32(cond, needsBarrierAddr, Imm32(0x1), label);
michael@0 647 }
michael@0 648
michael@0 649 template <typename T>
michael@0 650 void callPreBarrier(const T &address, MIRType type) {
michael@0 651 JS_ASSERT(type == MIRType_Value ||
michael@0 652 type == MIRType_String ||
michael@0 653 type == MIRType_Object ||
michael@0 654 type == MIRType_Shape);
michael@0 655 Label done;
michael@0 656
michael@0 657 if (type == MIRType_Value)
michael@0 658 branchTestGCThing(Assembler::NotEqual, address, &done);
michael@0 659
michael@0 660 Push(PreBarrierReg);
michael@0 661 computeEffectiveAddress(address, PreBarrierReg);
michael@0 662
michael@0 663 const JitRuntime *rt = GetIonContext()->runtime->jitRuntime();
michael@0 664 JitCode *preBarrier = (type == MIRType_Shape)
michael@0 665 ? rt->shapePreBarrier()
michael@0 666 : rt->valuePreBarrier();
michael@0 667
michael@0 668 call(preBarrier);
michael@0 669 Pop(PreBarrierReg);
michael@0 670
michael@0 671 bind(&done);
michael@0 672 }
michael@0 673
michael@0 674 template <typename T>
michael@0 675 void patchableCallPreBarrier(const T &address, MIRType type) {
michael@0 676 JS_ASSERT(type == MIRType_Value ||
michael@0 677 type == MIRType_String ||
michael@0 678 type == MIRType_Object ||
michael@0 679 type == MIRType_Shape);
michael@0 680
michael@0 681 Label done;
michael@0 682
michael@0 683 // All barriers are off by default.
michael@0 684 // They are enabled if necessary at the end of CodeGenerator::generate().
michael@0 685 CodeOffsetLabel nopJump = toggledJump(&done);
michael@0 686 writePrebarrierOffset(nopJump);
michael@0 687
michael@0 688 callPreBarrier(address, type);
michael@0 689 jump(&done);
michael@0 690
michael@0 691 align(8);
michael@0 692 bind(&done);
michael@0 693 }
michael@0 694
michael@0 695 void branchNurseryPtr(Condition cond, const Address &ptr1, const ImmMaybeNurseryPtr &ptr2,
michael@0 696 Label *label);
michael@0 697 void moveNurseryPtr(const ImmMaybeNurseryPtr &ptr, Register reg);
michael@0 698
michael@0 699 void canonicalizeDouble(FloatRegister reg) {
michael@0 700 Label notNaN;
michael@0 701 branchDouble(DoubleOrdered, reg, reg, &notNaN);
michael@0 702 loadConstantDouble(JS::GenericNaN(), reg);
michael@0 703 bind(&notNaN);
michael@0 704 }
michael@0 705
michael@0 706 void canonicalizeFloat(FloatRegister reg) {
michael@0 707 Label notNaN;
michael@0 708 branchFloat(DoubleOrdered, reg, reg, &notNaN);
michael@0 709 loadConstantFloat32(float(JS::GenericNaN()), reg);
michael@0 710 bind(&notNaN);
michael@0 711 }
michael@0 712
michael@0 713 template<typename T>
michael@0 714 void loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp, Label *fail);
michael@0 715
michael@0 716 template<typename T>
michael@0 717 void loadFromTypedArray(int arrayType, const T &src, const ValueOperand &dest, bool allowDouble,
michael@0 718 Register temp, Label *fail);
michael@0 719
michael@0 720 template<typename S, typename T>
michael@0 721 void storeToTypedIntArray(int arrayType, const S &value, const T &dest) {
michael@0 722 switch (arrayType) {
michael@0 723 case ScalarTypeDescr::TYPE_INT8:
michael@0 724 case ScalarTypeDescr::TYPE_UINT8:
michael@0 725 case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
michael@0 726 store8(value, dest);
michael@0 727 break;
michael@0 728 case ScalarTypeDescr::TYPE_INT16:
michael@0 729 case ScalarTypeDescr::TYPE_UINT16:
michael@0 730 store16(value, dest);
michael@0 731 break;
michael@0 732 case ScalarTypeDescr::TYPE_INT32:
michael@0 733 case ScalarTypeDescr::TYPE_UINT32:
michael@0 734 store32(value, dest);
michael@0 735 break;
michael@0 736 default:
michael@0 737 MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
michael@0 738 }
michael@0 739 }
michael@0 740
michael@0 741 void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const BaseIndex &dest);
michael@0 742 void storeToTypedFloatArray(int arrayType, const FloatRegister &value, const Address &dest);
michael@0 743
michael@0 744 Register extractString(const Address &address, Register scratch) {
michael@0 745 return extractObject(address, scratch);
michael@0 746 }
michael@0 747 Register extractString(const ValueOperand &value, Register scratch) {
michael@0 748 return extractObject(value, scratch);
michael@0 749 }
michael@0 750
michael@0 751 using MacroAssemblerSpecific::extractTag;
michael@0 752 Register extractTag(const TypedOrValueRegister &reg, Register scratch) {
michael@0 753 if (reg.hasValue())
michael@0 754 return extractTag(reg.valueReg(), scratch);
michael@0 755 mov(ImmWord(MIRTypeToTag(reg.type())), scratch);
michael@0 756 return scratch;
michael@0 757 }
michael@0 758
michael@0 759 using MacroAssemblerSpecific::extractObject;
michael@0 760 Register extractObject(const TypedOrValueRegister &reg, Register scratch) {
michael@0 761 if (reg.hasValue())
michael@0 762 return extractObject(reg.valueReg(), scratch);
michael@0 763 JS_ASSERT(reg.type() == MIRType_Object);
michael@0 764 return reg.typedReg().gpr();
michael@0 765 }
michael@0 766
michael@0 767 // Inline version of js_TypedArray_uint8_clamp_double.
michael@0 768 // This function clobbers the input register.
michael@0 769 void clampDoubleToUint8(FloatRegister input, Register output);
michael@0 770
michael@0 771 using MacroAssemblerSpecific::ensureDouble;
michael@0 772
michael@0 773 template <typename S>
michael@0 774 void ensureDouble(const S &source, FloatRegister dest, Label *failure) {
michael@0 775 Label isDouble, done;
michael@0 776 branchTestDouble(Assembler::Equal, source, &isDouble);
michael@0 777 branchTestInt32(Assembler::NotEqual, source, failure);
michael@0 778
michael@0 779 convertInt32ToDouble(source, dest);
michael@0 780 jump(&done);
michael@0 781
michael@0 782 bind(&isDouble);
michael@0 783 unboxDouble(source, dest);
michael@0 784
michael@0 785 bind(&done);
michael@0 786 }
michael@0 787
michael@0 788 // Emit type case branch on tag matching if the type tag in the definition
michael@0 789 // might actually be that type.
michael@0 790 void branchEqualTypeIfNeeded(MIRType type, MDefinition *maybeDef, Register tag, Label *label);
michael@0 791
michael@0 792 // Inline allocation.
michael@0 793 void newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail,
michael@0 794 gc::InitialHeap initialHeap = gc::DefaultHeap);
michael@0 795 void newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail,
michael@0 796 gc::InitialHeap initialHeap);
michael@0 797 void newGCString(Register result, Register temp, Label *fail);
michael@0 798 void newGCFatInlineString(Register result, Register temp, Label *fail);
michael@0 799
michael@0 800 void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
michael@0 801 gc::AllocKind allocKind, Label *fail);
michael@0 802 void newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
michael@0 803 JSObject *templateObject, Label *fail);
michael@0 804 void newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
michael@0 805 Label *fail);
michael@0 806 void newGCFatInlineStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
michael@0 807 Label *fail);
michael@0 808
michael@0 809 void copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj,
michael@0 810 uint32_t start, uint32_t end);
michael@0 811 void fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj,
michael@0 812 uint32_t start, uint32_t end);
michael@0 813 void initGCSlots(Register obj, Register temp, JSObject *templateObj);
michael@0 814 void initGCThing(Register obj, Register temp, JSObject *templateObj);
michael@0 815
michael@0 816 // Compares two strings for equality based on the JSOP.
michael@0 817 // This checks for identical pointers, atoms and length and fails for everything else.
michael@0 818 void compareStrings(JSOp op, Register left, Register right, Register result,
michael@0 819 Register temp, Label *fail);
michael@0 820
michael@0 821 // Checks the flags that signal that parallel code may need to interrupt or
michael@0 822 // abort. Branches to fail in that case.
michael@0 823 void checkInterruptFlagPar(Register tempReg, Label *fail);
michael@0 824
michael@0 825 // If the JitCode that created this assembler needs to transition into the VM,
michael@0 826 // we want to store the JitCode on the stack in order to mark it during a GC.
michael@0 827 // This is a reference to a patch location where the JitCode* will be written.
michael@0 828 private:
michael@0 829 CodeOffsetLabel exitCodePatch_;
michael@0 830
michael@0 831 public:
michael@0 832 void enterExitFrame(const VMFunction *f = nullptr) {
michael@0 833 linkExitFrame();
michael@0 834 // Push the ioncode. (Bailout or VM wrapper)
michael@0 835 exitCodePatch_ = PushWithPatch(ImmWord(-1));
michael@0 836 // Push VMFunction pointer, to mark arguments.
michael@0 837 Push(ImmPtr(f));
michael@0 838 }
michael@0 839 void enterFakeExitFrame(JitCode *codeVal = nullptr) {
michael@0 840 linkExitFrame();
michael@0 841 Push(ImmPtr(codeVal));
michael@0 842 Push(ImmPtr(nullptr));
michael@0 843 }
michael@0 844
michael@0 845 void loadThreadPool(Register pool) {
michael@0 846 // JitRuntimes are tied to JSRuntimes and there is one ThreadPool per
michael@0 847 // JSRuntime, so we can hardcode the ThreadPool address here.
michael@0 848 movePtr(ImmPtr(GetIonContext()->runtime->addressOfThreadPool()), pool);
michael@0 849 }
michael@0 850
michael@0 851 void loadForkJoinContext(Register cx, Register scratch);
michael@0 852 void loadContext(Register cxReg, Register scratch, ExecutionMode executionMode);
michael@0 853
michael@0 854 void enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx,
michael@0 855 Register scratch);
michael@0 856
michael@0 857 void enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch,
michael@0 858 ExecutionMode executionMode);
michael@0 859
michael@0 860 void enterFakeParallelExitFrame(Register cx, Register scratch,
michael@0 861 JitCode *codeVal = nullptr);
michael@0 862
michael@0 863 void enterFakeExitFrame(Register cxReg, Register scratch,
michael@0 864 ExecutionMode executionMode,
michael@0 865 JitCode *codeVal = nullptr);
michael@0 866
michael@0 867 void leaveExitFrame() {
michael@0 868 freeStack(IonExitFooterFrame::Size());
michael@0 869 }
michael@0 870
michael@0 871 bool hasEnteredExitFrame() const {
michael@0 872 return exitCodePatch_.offset() != 0;
michael@0 873 }
michael@0 874
michael@0 875 void link(JitCode *code) {
michael@0 876 JS_ASSERT(!oom());
michael@0 877 // If this code can transition to C++ code and witness a GC, then we need to store
michael@0 878 // the JitCode onto the stack in order to GC it correctly. exitCodePatch should
michael@0 879 // be unset if the code never needed to push its JitCode*.
michael@0 880 if (hasEnteredExitFrame()) {
michael@0 881 exitCodePatch_.fixup(this);
michael@0 882 patchDataWithValueCheck(CodeLocationLabel(code, exitCodePatch_),
michael@0 883 ImmPtr(code),
michael@0 884 ImmPtr((void*)-1));
michael@0 885 }
michael@0 886
michael@0 887 }
michael@0 888
michael@0 889 // Generates code used to complete a bailout.
michael@0 890 void generateBailoutTail(Register scratch, Register bailoutInfo);
michael@0 891
michael@0 892 // These functions exist as small wrappers around sites where execution can
michael@0 893 // leave the currently running stream of instructions. They exist so that
michael@0 894 // instrumentation may be put in place around them if necessary and the
michael@0 895 // instrumentation is enabled. For the functions that return a uint32_t,
michael@0 896 // they are returning the offset of the assembler just after the call has
michael@0 897 // been made so that a safepoint can be made at that location.
michael@0 898
michael@0 899 template <typename T>
michael@0 900 void callWithABINoProfiling(const T &fun, MoveOp::Type result = MoveOp::GENERAL) {
michael@0 901 MacroAssemblerSpecific::callWithABI(fun, result);
michael@0 902 }
michael@0 903
michael@0 904 template <typename T>
michael@0 905 void callWithABI(const T &fun, MoveOp::Type result = MoveOp::GENERAL) {
michael@0 906 leaveSPSFrame();
michael@0 907 callWithABINoProfiling(fun, result);
michael@0 908 reenterSPSFrame();
michael@0 909 }
michael@0 910
michael@0 911 // see above comment for what is returned
michael@0 912 uint32_t callIon(Register callee) {
michael@0 913 leaveSPSFrame();
michael@0 914 MacroAssemblerSpecific::callIon(callee);
michael@0 915 uint32_t ret = currentOffset();
michael@0 916 reenterSPSFrame();
michael@0 917 return ret;
michael@0 918 }
michael@0 919
michael@0 920 // see above comment for what is returned
michael@0 921 uint32_t callWithExitFrame(JitCode *target) {
michael@0 922 leaveSPSFrame();
michael@0 923 MacroAssemblerSpecific::callWithExitFrame(target);
michael@0 924 uint32_t ret = currentOffset();
michael@0 925 reenterSPSFrame();
michael@0 926 return ret;
michael@0 927 }
michael@0 928
michael@0 929 // see above comment for what is returned
michael@0 930 uint32_t callWithExitFrame(JitCode *target, Register dynStack) {
michael@0 931 leaveSPSFrame();
michael@0 932 MacroAssemblerSpecific::callWithExitFrame(target, dynStack);
michael@0 933 uint32_t ret = currentOffset();
michael@0 934 reenterSPSFrame();
michael@0 935 return ret;
michael@0 936 }
michael@0 937
michael@0 938 void branchTestObjectTruthy(bool truthy, Register objReg, Register scratch,
michael@0 939 Label *slowCheck, Label *checked)
michael@0 940 {
michael@0 941 // The branches to out-of-line code here implement a conservative version
michael@0 942 // of the JSObject::isWrapper test performed in EmulatesUndefined. If none
michael@0 943 // of the branches are taken, we can check class flags directly.
michael@0 944 loadObjClass(objReg, scratch);
michael@0 945 Address flags(scratch, Class::offsetOfFlags());
michael@0 946
michael@0 947 branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_IS_PROXY), slowCheck);
michael@0 948
michael@0 949 Condition cond = truthy ? Assembler::Zero : Assembler::NonZero;
michael@0 950 branchTest32(cond, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), checked);
michael@0 951 }
michael@0 952
michael@0 953 private:
michael@0 954 // These two functions are helpers used around call sites throughout the
michael@0 955 // assembler. They are called from the above call wrappers to emit the
michael@0 956 // necessary instrumentation.
michael@0 957 void leaveSPSFrame() {
michael@0 958 if (!sps_ || !sps_->enabled())
michael@0 959 return;
michael@0 960 // No registers are guaranteed to be available, so push/pop a register
michael@0 961 // so we can use one
michael@0 962 push(CallTempReg0);
michael@0 963 sps_->leave(*this, CallTempReg0);
michael@0 964 pop(CallTempReg0);
michael@0 965 }
michael@0 966
michael@0 967 void reenterSPSFrame() {
michael@0 968 if (!sps_ || !sps_->enabled())
michael@0 969 return;
michael@0 970 // Attempt to use a now-free register within a given set, but if the
michael@0 971 // architecture being built doesn't have an available register, resort
michael@0 972 // to push/pop
michael@0 973 GeneralRegisterSet regs(Registers::TempMask & ~Registers::JSCallMask &
michael@0 974 ~Registers::CallMask);
michael@0 975 if (regs.empty()) {
michael@0 976 push(CallTempReg0);
michael@0 977 sps_->reenter(*this, CallTempReg0);
michael@0 978 pop(CallTempReg0);
michael@0 979 } else {
michael@0 980 sps_->reenter(*this, regs.getAny());
michael@0 981 }
michael@0 982 }
michael@0 983
michael@0 984 void spsProfileEntryAddress(SPSProfiler *p, int offset, Register temp,
michael@0 985 Label *full)
michael@0 986 {
michael@0 987 movePtr(ImmPtr(p->sizePointer()), temp);
michael@0 988 load32(Address(temp, 0), temp);
michael@0 989 if (offset != 0)
michael@0 990 add32(Imm32(offset), temp);
michael@0 991 branch32(Assembler::GreaterThanOrEqual, temp, Imm32(p->maxSize()), full);
michael@0 992
michael@0 993 // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*)))
michael@0 994 JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*));
michael@0 995 lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp);
michael@0 996 addPtr(ImmPtr(p->stack()), temp);
michael@0 997 }
michael@0 998
michael@0 999 // The safe version of the above method refrains from assuming that the fields
michael@0 1000 // of the SPSProfiler class are going to stay the same across different runs of
michael@0 1001 // the jitcode. Ion can use the more efficient unsafe version because ion jitcode
michael@0 1002 // will not survive changes to to the profiler settings. Baseline jitcode, however,
michael@0 1003 // can span these changes, so any hardcoded field values will be incorrect afterwards.
michael@0 1004 // All the sps-related methods used by baseline call |spsProfileEntryAddressSafe|.
michael@0 1005 void spsProfileEntryAddressSafe(SPSProfiler *p, int offset, Register temp,
michael@0 1006 Label *full)
michael@0 1007 {
michael@0 1008 // Load size pointer
michael@0 1009 loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp);
michael@0 1010
michael@0 1011 // Load size
michael@0 1012 load32(Address(temp, 0), temp);
michael@0 1013 if (offset != 0)
michael@0 1014 add32(Imm32(offset), temp);
michael@0 1015
michael@0 1016 // Test against max size.
michael@0 1017 branch32(Assembler::LessThanOrEqual, AbsoluteAddress(p->addressOfMaxSize()), temp, full);
michael@0 1018
michael@0 1019 // 4 * sizeof(void*) * idx = idx << (2 + log(sizeof(void*)))
michael@0 1020 JS_STATIC_ASSERT(sizeof(ProfileEntry) == 4 * sizeof(void*));
michael@0 1021 lshiftPtr(Imm32(2 + (sizeof(void*) == 4 ? 2 : 3)), temp);
michael@0 1022 push(temp);
michael@0 1023 loadPtr(AbsoluteAddress(p->addressOfStack()), temp);
michael@0 1024 addPtr(Address(StackPointer, 0), temp);
michael@0 1025 addPtr(Imm32(sizeof(size_t)), StackPointer);
michael@0 1026 }
michael@0 1027
michael@0 1028 public:
michael@0 1029 // These functions are needed by the IonInstrumentation interface defined in
michael@0 1030 // vm/SPSProfiler.h. They will modify the pseudostack provided to SPS to
michael@0 1031 // perform the actual instrumentation.
michael@0 1032
michael@0 1033 void spsUpdatePCIdx(SPSProfiler *p, int32_t idx, Register temp) {
michael@0 1034 Label stackFull;
michael@0 1035 spsProfileEntryAddress(p, -1, temp, &stackFull);
michael@0 1036 store32(Imm32(idx), Address(temp, ProfileEntry::offsetOfPCIdx()));
michael@0 1037 bind(&stackFull);
michael@0 1038 }
michael@0 1039
michael@0 1040 void spsUpdatePCIdx(SPSProfiler *p, Register idx, Register temp) {
michael@0 1041 Label stackFull;
michael@0 1042 spsProfileEntryAddressSafe(p, -1, temp, &stackFull);
michael@0 1043 store32(idx, Address(temp, ProfileEntry::offsetOfPCIdx()));
michael@0 1044 bind(&stackFull);
michael@0 1045 }
michael@0 1046
michael@0 1047 // spsPushFrame variant for Ion-optimized scripts.
michael@0 1048 void spsPushFrame(SPSProfiler *p, const char *str, JSScript *s, Register temp) {
michael@0 1049 Label stackFull;
michael@0 1050 spsProfileEntryAddress(p, 0, temp, &stackFull);
michael@0 1051
michael@0 1052 storePtr(ImmPtr(str), Address(temp, ProfileEntry::offsetOfString()));
michael@0 1053 storePtr(ImmGCPtr(s), Address(temp, ProfileEntry::offsetOfScript()));
michael@0 1054 storePtr(ImmPtr((void*) ProfileEntry::SCRIPT_OPT_STACKPOINTER),
michael@0 1055 Address(temp, ProfileEntry::offsetOfStackAddress()));
michael@0 1056 store32(Imm32(ProfileEntry::NullPCIndex), Address(temp, ProfileEntry::offsetOfPCIdx()));
michael@0 1057
michael@0 1058 /* Always increment the stack size, whether or not we actually pushed. */
michael@0 1059 bind(&stackFull);
michael@0 1060 movePtr(ImmPtr(p->sizePointer()), temp);
michael@0 1061 add32(Imm32(1), Address(temp, 0));
michael@0 1062 }
michael@0 1063
michael@0 1064 // spsPushFrame variant for Baseline-optimized scripts.
michael@0 1065 void spsPushFrame(SPSProfiler *p, const Address &str, const Address &script,
michael@0 1066 Register temp, Register temp2)
michael@0 1067 {
michael@0 1068 Label stackFull;
michael@0 1069 spsProfileEntryAddressSafe(p, 0, temp, &stackFull);
michael@0 1070
michael@0 1071 loadPtr(str, temp2);
michael@0 1072 storePtr(temp2, Address(temp, ProfileEntry::offsetOfString()));
michael@0 1073
michael@0 1074 loadPtr(script, temp2);
michael@0 1075 storePtr(temp2, Address(temp, ProfileEntry::offsetOfScript()));
michael@0 1076
michael@0 1077 storePtr(ImmPtr(nullptr), Address(temp, ProfileEntry::offsetOfStackAddress()));
michael@0 1078
michael@0 1079 // Store 0 for PCIdx because that's what interpreter does.
michael@0 1080 // (See probes::EnterScript, which calls spsProfiler.enter, which pushes an entry
michael@0 1081 // with 0 pcIdx).
michael@0 1082 store32(Imm32(0), Address(temp, ProfileEntry::offsetOfPCIdx()));
michael@0 1083
michael@0 1084 /* Always increment the stack size, whether or not we actually pushed. */
michael@0 1085 bind(&stackFull);
michael@0 1086 movePtr(ImmPtr(p->addressOfSizePointer()), temp);
michael@0 1087 loadPtr(Address(temp, 0), temp);
michael@0 1088 add32(Imm32(1), Address(temp, 0));
michael@0 1089 }
michael@0 1090
michael@0 1091 void spsPopFrame(SPSProfiler *p, Register temp) {
michael@0 1092 movePtr(ImmPtr(p->sizePointer()), temp);
michael@0 1093 add32(Imm32(-1), Address(temp, 0));
michael@0 1094 }
michael@0 1095
michael@0 1096 // spsPropFrameSafe does not assume |profiler->sizePointer()| will stay constant.
michael@0 1097 void spsPopFrameSafe(SPSProfiler *p, Register temp) {
michael@0 1098 loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp);
michael@0 1099 add32(Imm32(-1), Address(temp, 0));
michael@0 1100 }
michael@0 1101
michael@0 1102 static const char enterJitLabel[];
michael@0 1103 void spsMarkJit(SPSProfiler *p, Register framePtr, Register temp);
michael@0 1104 void spsUnmarkJit(SPSProfiler *p, Register temp);
michael@0 1105
michael@0 1106 void loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode, Label *failure);
michael@0 1107 void loadBaselineOrIonNoArgCheck(Register callee, Register dest, ExecutionMode mode, Label *failure);
michael@0 1108
michael@0 1109 void loadBaselineFramePtr(Register framePtr, Register dest);
michael@0 1110
michael@0 1111 void pushBaselineFramePtr(Register framePtr, Register scratch) {
michael@0 1112 loadBaselineFramePtr(framePtr, scratch);
michael@0 1113 push(scratch);
michael@0 1114 }
michael@0 1115
michael@0 1116 private:
michael@0 1117 void handleFailure(ExecutionMode executionMode);
michael@0 1118
michael@0 1119 public:
michael@0 1120 Label *exceptionLabel() {
michael@0 1121 // Exceptions are currently handled the same way as sequential failures.
michael@0 1122 return &sequentialFailureLabel_;
michael@0 1123 }
michael@0 1124
michael@0 1125 Label *failureLabel(ExecutionMode executionMode) {
michael@0 1126 switch (executionMode) {
michael@0 1127 case SequentialExecution: return &sequentialFailureLabel_;
michael@0 1128 case ParallelExecution: return &parallelFailureLabel_;
michael@0 1129 default: MOZ_ASSUME_UNREACHABLE("Unexpected execution mode");
michael@0 1130 }
michael@0 1131 }
michael@0 1132
michael@0 1133 void finish();
michael@0 1134
michael@0 1135 void assumeUnreachable(const char *output);
michael@0 1136 void printf(const char *output);
michael@0 1137 void printf(const char *output, Register value);
michael@0 1138
michael@0 1139 #ifdef JS_TRACE_LOGGING
michael@0 1140 void tracelogStart(Register logger, uint32_t textId);
michael@0 1141 void tracelogStart(Register logger, Register textId);
michael@0 1142 void tracelogStop(Register logger, uint32_t textId);
michael@0 1143 void tracelogStop(Register logger, Register textId);
michael@0 1144 void tracelogStop(Register logger);
michael@0 1145 #endif
michael@0 1146
michael@0 1147 #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
michael@0 1148 JS_ASSERT(IsFloatingPointType(type)); \
michael@0 1149 if (type == MIRType_Double) \
michael@0 1150 method##Double(arg1d, arg2); \
michael@0 1151 else \
michael@0 1152 method##Float32(arg1f, arg2); \
michael@0 1153
michael@0 1154 void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) {
michael@0 1155 DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
michael@0 1156 }
michael@0 1157 void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
michael@0 1158 DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
michael@0 1159 }
michael@0 1160 void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
michael@0 1161 DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
michael@0 1162 }
michael@0 1163 void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) {
michael@0 1164 DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
michael@0 1165 }
michael@0 1166
michael@0 1167 #undef DISPATCH_FLOATING_POINT_OP
michael@0 1168
michael@0 1169 void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label *fail,
michael@0 1170 MIRType outputType);
michael@0 1171 bool convertValueToFloatingPoint(JSContext *cx, const Value &v, FloatRegister output,
michael@0 1172 Label *fail, MIRType outputType);
michael@0 1173 bool convertConstantOrRegisterToFloatingPoint(JSContext *cx, ConstantOrRegister src,
michael@0 1174 FloatRegister output, Label *fail,
michael@0 1175 MIRType outputType);
michael@0 1176 void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
michael@0 1177 Label *fail, MIRType outputType);
michael@0 1178
michael@0 1179 void convertInt32ValueToDouble(const Address &address, Register scratch, Label *done);
michael@0 1180 void convertValueToDouble(ValueOperand value, FloatRegister output, Label *fail) {
michael@0 1181 convertValueToFloatingPoint(value, output, fail, MIRType_Double);
michael@0 1182 }
michael@0 1183 bool convertValueToDouble(JSContext *cx, const Value &v, FloatRegister output, Label *fail) {
michael@0 1184 return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Double);
michael@0 1185 }
michael@0 1186 bool convertConstantOrRegisterToDouble(JSContext *cx, ConstantOrRegister src,
michael@0 1187 FloatRegister output, Label *fail)
michael@0 1188 {
michael@0 1189 return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Double);
michael@0 1190 }
michael@0 1191 void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label *fail) {
michael@0 1192 convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Double);
michael@0 1193 }
michael@0 1194
michael@0 1195 void convertValueToFloat(ValueOperand value, FloatRegister output, Label *fail) {
michael@0 1196 convertValueToFloatingPoint(value, output, fail, MIRType_Float32);
michael@0 1197 }
michael@0 1198 bool convertValueToFloat(JSContext *cx, const Value &v, FloatRegister output, Label *fail) {
michael@0 1199 return convertValueToFloatingPoint(cx, v, output, fail, MIRType_Float32);
michael@0 1200 }
michael@0 1201 bool convertConstantOrRegisterToFloat(JSContext *cx, ConstantOrRegister src,
michael@0 1202 FloatRegister output, Label *fail)
michael@0 1203 {
michael@0 1204 return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType_Float32);
michael@0 1205 }
michael@0 1206 void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label *fail) {
michael@0 1207 convertTypedOrValueToFloatingPoint(src, output, fail, MIRType_Float32);
michael@0 1208 }
michael@0 1209
michael@0 1210 enum IntConversionBehavior {
michael@0 1211 IntConversion_Normal,
michael@0 1212 IntConversion_NegativeZeroCheck,
michael@0 1213 IntConversion_Truncate,
michael@0 1214 IntConversion_ClampToUint8,
michael@0 1215 };
michael@0 1216
michael@0 1217 enum IntConversionInputKind {
michael@0 1218 IntConversion_NumbersOnly,
michael@0 1219 IntConversion_NumbersOrBoolsOnly,
michael@0 1220 IntConversion_Any
michael@0 1221 };
michael@0 1222
michael@0 1223 //
michael@0 1224 // Functions for converting values to int.
michael@0 1225 //
michael@0 1226 void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
michael@0 1227 Label *truncateFail, Label *fail, IntConversionBehavior behavior);
michael@0 1228
michael@0 1229 // Strings may be handled by providing labels to jump to when the behavior
michael@0 1230 // is truncation or clamping. The subroutine, usually an OOL call, is
michael@0 1231 // passed the unboxed string in |stringReg| and should convert it to a
michael@0 1232 // double store into |temp|.
michael@0 1233 void convertValueToInt(ValueOperand value, MDefinition *input,
michael@0 1234 Label *handleStringEntry, Label *handleStringRejoin,
michael@0 1235 Label *truncateDoubleSlow,
michael@0 1236 Register stringReg, FloatRegister temp, Register output,
michael@0 1237 Label *fail, IntConversionBehavior behavior,
michael@0 1238 IntConversionInputKind conversion = IntConversion_Any);
michael@0 1239 void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label *fail,
michael@0 1240 IntConversionBehavior behavior)
michael@0 1241 {
michael@0 1242 convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output,
michael@0 1243 fail, behavior);
michael@0 1244 }
michael@0 1245 bool convertValueToInt(JSContext *cx, const Value &v, Register output, Label *fail,
michael@0 1246 IntConversionBehavior behavior);
michael@0 1247 bool convertConstantOrRegisterToInt(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
michael@0 1248 Register output, Label *fail, IntConversionBehavior behavior);
michael@0 1249 void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output,
michael@0 1250 Label *fail, IntConversionBehavior behavior);
michael@0 1251
michael@0 1252 //
michael@0 1253 // Convenience functions for converting values to int32.
michael@0 1254 //
michael@0 1255 void convertValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail,
michael@0 1256 bool negativeZeroCheck)
michael@0 1257 {
michael@0 1258 convertValueToInt(value, temp, output, fail, negativeZeroCheck
michael@0 1259 ? IntConversion_NegativeZeroCheck
michael@0 1260 : IntConversion_Normal);
michael@0 1261 }
michael@0 1262 void convertValueToInt32(ValueOperand value, MDefinition *input,
michael@0 1263 FloatRegister temp, Register output, Label *fail,
michael@0 1264 bool negativeZeroCheck, IntConversionInputKind conversion = IntConversion_Any)
michael@0 1265 {
michael@0 1266 convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
michael@0 1267 negativeZeroCheck
michael@0 1268 ? IntConversion_NegativeZeroCheck
michael@0 1269 : IntConversion_Normal,
michael@0 1270 conversion);
michael@0 1271 }
michael@0 1272 bool convertValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail,
michael@0 1273 bool negativeZeroCheck)
michael@0 1274 {
michael@0 1275 return convertValueToInt(cx, v, output, fail, negativeZeroCheck
michael@0 1276 ? IntConversion_NegativeZeroCheck
michael@0 1277 : IntConversion_Normal);
michael@0 1278 }
michael@0 1279 bool convertConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
michael@0 1280 Register output, Label *fail, bool negativeZeroCheck)
michael@0 1281 {
michael@0 1282 return convertConstantOrRegisterToInt(cx, src, temp, output, fail, negativeZeroCheck
michael@0 1283 ? IntConversion_NegativeZeroCheck
michael@0 1284 : IntConversion_Normal);
michael@0 1285 }
michael@0 1286 void convertTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
michael@0 1287 Label *fail, bool negativeZeroCheck)
michael@0 1288 {
michael@0 1289 convertTypedOrValueToInt(src, temp, output, fail, negativeZeroCheck
michael@0 1290 ? IntConversion_NegativeZeroCheck
michael@0 1291 : IntConversion_Normal);
michael@0 1292 }
michael@0 1293
michael@0 1294 //
michael@0 1295 // Convenience functions for truncating values to int32.
michael@0 1296 //
michael@0 1297 void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label *fail) {
michael@0 1298 convertValueToInt(value, temp, output, fail, IntConversion_Truncate);
michael@0 1299 }
michael@0 1300 void truncateValueToInt32(ValueOperand value, MDefinition *input,
michael@0 1301 Label *handleStringEntry, Label *handleStringRejoin,
michael@0 1302 Label *truncateDoubleSlow,
michael@0 1303 Register stringReg, FloatRegister temp, Register output, Label *fail)
michael@0 1304 {
michael@0 1305 convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow,
michael@0 1306 stringReg, temp, output, fail, IntConversion_Truncate);
michael@0 1307 }
michael@0 1308 void truncateValueToInt32(ValueOperand value, MDefinition *input,
michael@0 1309 FloatRegister temp, Register output, Label *fail)
michael@0 1310 {
michael@0 1311 convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
michael@0 1312 IntConversion_Truncate);
michael@0 1313 }
michael@0 1314 bool truncateValueToInt32(JSContext *cx, const Value &v, Register output, Label *fail) {
michael@0 1315 return convertValueToInt(cx, v, output, fail, IntConversion_Truncate);
michael@0 1316 }
michael@0 1317 bool truncateConstantOrRegisterToInt32(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
michael@0 1318 Register output, Label *fail)
michael@0 1319 {
michael@0 1320 return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversion_Truncate);
michael@0 1321 }
michael@0 1322 void truncateTypedOrValueToInt32(TypedOrValueRegister src, FloatRegister temp, Register output,
michael@0 1323 Label *fail)
michael@0 1324 {
michael@0 1325 convertTypedOrValueToInt(src, temp, output, fail, IntConversion_Truncate);
michael@0 1326 }
michael@0 1327
michael@0 1328 // Convenience functions for clamping values to uint8.
michael@0 1329 void clampValueToUint8(ValueOperand value, FloatRegister temp, Register output, Label *fail) {
michael@0 1330 convertValueToInt(value, temp, output, fail, IntConversion_ClampToUint8);
michael@0 1331 }
michael@0 1332 void clampValueToUint8(ValueOperand value, MDefinition *input,
michael@0 1333 Label *handleStringEntry, Label *handleStringRejoin,
michael@0 1334 Register stringReg, FloatRegister temp, Register output, Label *fail)
michael@0 1335 {
michael@0 1336 convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr,
michael@0 1337 stringReg, temp, output, fail, IntConversion_ClampToUint8);
michael@0 1338 }
michael@0 1339 void clampValueToUint8(ValueOperand value, MDefinition *input,
michael@0 1340 FloatRegister temp, Register output, Label *fail)
michael@0 1341 {
michael@0 1342 convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
michael@0 1343 IntConversion_ClampToUint8);
michael@0 1344 }
michael@0 1345 bool clampValueToUint8(JSContext *cx, const Value &v, Register output, Label *fail) {
michael@0 1346 return convertValueToInt(cx, v, output, fail, IntConversion_ClampToUint8);
michael@0 1347 }
michael@0 1348 bool clampConstantOrRegisterToUint8(JSContext *cx, ConstantOrRegister src, FloatRegister temp,
michael@0 1349 Register output, Label *fail)
michael@0 1350 {
michael@0 1351 return convertConstantOrRegisterToInt(cx, src, temp, output, fail,
michael@0 1352 IntConversion_ClampToUint8);
michael@0 1353 }
michael@0 1354 void clampTypedOrValueToUint8(TypedOrValueRegister src, FloatRegister temp, Register output,
michael@0 1355 Label *fail)
michael@0 1356 {
michael@0 1357 convertTypedOrValueToInt(src, temp, output, fail, IntConversion_ClampToUint8);
michael@0 1358 }
michael@0 1359
michael@0 1360 public:
michael@0 1361 class AfterICSaveLive {
michael@0 1362 friend class MacroAssembler;
michael@0 1363 AfterICSaveLive(uint32_t initialStack)
michael@0 1364 #ifdef JS_DEBUG
michael@0 1365 : initialStack(initialStack)
michael@0 1366 #endif
michael@0 1367 {}
michael@0 1368
michael@0 1369 #ifdef JS_DEBUG
michael@0 1370 public:
michael@0 1371 uint32_t initialStack;
michael@0 1372 #endif
michael@0 1373 };
michael@0 1374
michael@0 1375 AfterICSaveLive icSaveLive(RegisterSet &liveRegs) {
michael@0 1376 PushRegsInMask(liveRegs);
michael@0 1377 return AfterICSaveLive(framePushed());
michael@0 1378 }
michael@0 1379
michael@0 1380 bool icBuildOOLFakeExitFrame(void *fakeReturnAddr, AfterICSaveLive &aic) {
michael@0 1381 return buildOOLFakeExitFrame(fakeReturnAddr);
michael@0 1382 }
michael@0 1383
michael@0 1384 void icRestoreLive(RegisterSet &liveRegs, AfterICSaveLive &aic) {
michael@0 1385 JS_ASSERT(framePushed() == aic.initialStack);
michael@0 1386 PopRegsInMask(liveRegs);
michael@0 1387 }
michael@0 1388 };
michael@0 1389
michael@0 1390 static inline Assembler::DoubleCondition
michael@0 1391 JSOpToDoubleCondition(JSOp op)
michael@0 1392 {
michael@0 1393 switch (op) {
michael@0 1394 case JSOP_EQ:
michael@0 1395 case JSOP_STRICTEQ:
michael@0 1396 return Assembler::DoubleEqual;
michael@0 1397 case JSOP_NE:
michael@0 1398 case JSOP_STRICTNE:
michael@0 1399 return Assembler::DoubleNotEqualOrUnordered;
michael@0 1400 case JSOP_LT:
michael@0 1401 return Assembler::DoubleLessThan;
michael@0 1402 case JSOP_LE:
michael@0 1403 return Assembler::DoubleLessThanOrEqual;
michael@0 1404 case JSOP_GT:
michael@0 1405 return Assembler::DoubleGreaterThan;
michael@0 1406 case JSOP_GE:
michael@0 1407 return Assembler::DoubleGreaterThanOrEqual;
michael@0 1408 default:
michael@0 1409 MOZ_ASSUME_UNREACHABLE("Unexpected comparison operation");
michael@0 1410 }
michael@0 1411 }
michael@0 1412
michael@0 1413 // Note: the op may have been inverted during lowering (to put constants in a
michael@0 1414 // position where they can be immediates), so it is important to use the
michael@0 1415 // lir->jsop() instead of the mir->jsop() when it is present.
michael@0 1416 static inline Assembler::Condition
michael@0 1417 JSOpToCondition(JSOp op, bool isSigned)
michael@0 1418 {
michael@0 1419 if (isSigned) {
michael@0 1420 switch (op) {
michael@0 1421 case JSOP_EQ:
michael@0 1422 case JSOP_STRICTEQ:
michael@0 1423 return Assembler::Equal;
michael@0 1424 case JSOP_NE:
michael@0 1425 case JSOP_STRICTNE:
michael@0 1426 return Assembler::NotEqual;
michael@0 1427 case JSOP_LT:
michael@0 1428 return Assembler::LessThan;
michael@0 1429 case JSOP_LE:
michael@0 1430 return Assembler::LessThanOrEqual;
michael@0 1431 case JSOP_GT:
michael@0 1432 return Assembler::GreaterThan;
michael@0 1433 case JSOP_GE:
michael@0 1434 return Assembler::GreaterThanOrEqual;
michael@0 1435 default:
michael@0 1436 MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation");
michael@0 1437 }
michael@0 1438 } else {
michael@0 1439 switch (op) {
michael@0 1440 case JSOP_EQ:
michael@0 1441 case JSOP_STRICTEQ:
michael@0 1442 return Assembler::Equal;
michael@0 1443 case JSOP_NE:
michael@0 1444 case JSOP_STRICTNE:
michael@0 1445 return Assembler::NotEqual;
michael@0 1446 case JSOP_LT:
michael@0 1447 return Assembler::Below;
michael@0 1448 case JSOP_LE:
michael@0 1449 return Assembler::BelowOrEqual;
michael@0 1450 case JSOP_GT:
michael@0 1451 return Assembler::Above;
michael@0 1452 case JSOP_GE:
michael@0 1453 return Assembler::AboveOrEqual;
michael@0 1454 default:
michael@0 1455 MOZ_ASSUME_UNREACHABLE("Unrecognized comparison operation");
michael@0 1456 }
michael@0 1457 }
michael@0 1458 }
michael@0 1459
michael@0 1460 } // namespace jit
michael@0 1461 } // namespace js
michael@0 1462
michael@0 1463 #endif // JS_ION
michael@0 1464
michael@0 1465 #endif /* jit_IonMacroAssembler_h */

mercurial