js/src/jit/IonMacroAssembler.cpp

Sat, 03 Jan 2015 20:18:00 +0100

author
Michael Schloh von Bennewitz <michael@schloh.com>
date
Sat, 03 Jan 2015 20:18:00 +0100
branch
TOR_BUG_3246
changeset 7
129ffea94266
permissions
-rw-r--r--

Conditionally enable double key logic according to:
private browsing mode or privacy.thirdparty.isolate preference and
implement in GetCookieStringCommon and FindCookie where it counts...
With some reservations of how to convince FindCookie users to test
condition and pass a nullptr when disabling double key logic.

     1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     2  * vim: set ts=8 sts=4 et sw=4 tw=99:
     3  * This Source Code Form is subject to the terms of the Mozilla Public
     4  * License, v. 2.0. If a copy of the MPL was not distributed with this
     5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     7 #include "jit/IonMacroAssembler.h"
     9 #include "jsinfer.h"
    10 #include "jsprf.h"
    12 #include "builtin/TypedObject.h"
    13 #include "jit/Bailouts.h"
    14 #include "jit/BaselineFrame.h"
    15 #include "jit/BaselineIC.h"
    16 #include "jit/BaselineJIT.h"
    17 #include "jit/Lowering.h"
    18 #include "jit/MIR.h"
    19 #include "jit/ParallelFunctions.h"
    20 #include "vm/ForkJoin.h"
    21 #include "vm/TraceLogging.h"
    23 #ifdef JSGC_GENERATIONAL
    24 # include "jsgcinlines.h"
    25 #endif
    26 #include "jsinferinlines.h"
    27 #include "jsobjinlines.h"
    29 using namespace js;
    30 using namespace js::jit;
    32 using JS::GenericNaN;
    34 namespace {
    36 // Emulate a TypeSet logic from a Type object to avoid duplicating the guard
    37 // logic.
    38 class TypeWrapper {
    39     types::Type t_;
    41   public:
    42     TypeWrapper(types::Type t) : t_(t) {}
    44     inline bool unknown() const {
    45         return t_.isUnknown();
    46     }
    47     inline bool hasType(types::Type t) const {
    48         if (t == types::Type::Int32Type())
    49             return t == t_ || t_ == types::Type::DoubleType();
    50         return t == t_;
    51     }
    52     inline unsigned getObjectCount() const {
    53         if (t_.isAnyObject() || t_.isUnknown() || !t_.isObject())
    54             return 0;
    55         return 1;
    56     }
    57     inline JSObject *getSingleObject(unsigned) const {
    58         if (t_.isSingleObject())
    59             return t_.singleObject();
    60         return nullptr;
    61     }
    62     inline types::TypeObject *getTypeObject(unsigned) const {
    63         if (t_.isTypeObject())
    64             return t_.typeObject();
    65         return nullptr;
    66     }
    67 };
    69 } /* anonymous namespace */
    71 template <typename Source, typename TypeSet> void
    72 MacroAssembler::guardTypeSet(const Source &address, const TypeSet *types,
    73                              Register scratch, Label *miss)
    74 {
    75     JS_ASSERT(!types->unknown());
    77     Label matched;
    78     types::Type tests[7] = {
    79         types::Type::Int32Type(),
    80         types::Type::UndefinedType(),
    81         types::Type::BooleanType(),
    82         types::Type::StringType(),
    83         types::Type::NullType(),
    84         types::Type::MagicArgType(),
    85         types::Type::AnyObjectType()
    86     };
    88     // The double type also implies Int32.
    89     // So replace the int32 test with the double one.
    90     if (types->hasType(types::Type::DoubleType())) {
    91         JS_ASSERT(types->hasType(types::Type::Int32Type()));
    92         tests[0] = types::Type::DoubleType();
    93     }
    95     Register tag = extractTag(address, scratch);
    97     // Emit all typed tests.
    98     BranchType lastBranch;
    99     for (size_t i = 0; i < 7; i++) {
   100         if (!types->hasType(tests[i]))
   101             continue;
   103         if (lastBranch.isInitialized())
   104             lastBranch.emit(*this);
   105         lastBranch = BranchType(Equal, tag, tests[i], &matched);
   106     }
   108     // If this is the last check, invert the last branch.
   109     if (types->hasType(types::Type::AnyObjectType()) || !types->getObjectCount()) {
   110         if (!lastBranch.isInitialized()) {
   111             jump(miss);
   112             return;
   113         }
   115         lastBranch.invertCondition();
   116         lastBranch.relink(miss);
   117         lastBranch.emit(*this);
   119         bind(&matched);
   120         return;
   121     }
   123     if (lastBranch.isInitialized())
   124         lastBranch.emit(*this);
   126     // Test specific objects.
   127     JS_ASSERT(scratch != InvalidReg);
   128     branchTestObject(NotEqual, tag, miss);
   129     Register obj = extractObject(address, scratch);
   130     guardObjectType(obj, types, scratch, miss);
   132     bind(&matched);
   133 }
   135 template <typename TypeSet> void
   136 MacroAssembler::guardObjectType(Register obj, const TypeSet *types,
   137                                 Register scratch, Label *miss)
   138 {
   139     JS_ASSERT(!types->unknown());
   140     JS_ASSERT(!types->hasType(types::Type::AnyObjectType()));
   141     JS_ASSERT(types->getObjectCount());
   142     JS_ASSERT(scratch != InvalidReg);
   144     Label matched;
   146     BranchGCPtr lastBranch;
   147     JS_ASSERT(!lastBranch.isInitialized());
   148     bool hasTypeObjects = false;
   149     unsigned count = types->getObjectCount();
   150     for (unsigned i = 0; i < count; i++) {
   151         if (!types->getSingleObject(i)) {
   152             hasTypeObjects = hasTypeObjects || types->getTypeObject(i);
   153             continue;
   154         }
   156         if (lastBranch.isInitialized())
   157             lastBranch.emit(*this);
   159         JSObject *object = types->getSingleObject(i);
   160         lastBranch = BranchGCPtr(Equal, obj, ImmGCPtr(object), &matched);
   161     }
   163     if (hasTypeObjects) {
   164         // We are possibly going to overwrite the obj register. So already
   165         // emit the branch, since branch depends on previous value of obj
   166         // register and there is definitely a branch following. So no need
   167         // to invert the condition.
   168         if (lastBranch.isInitialized())
   169             lastBranch.emit(*this);
   170         lastBranch = BranchGCPtr();
   172         // Note: Some platforms give the same register for obj and scratch.
   173         // Make sure when writing to scratch, the obj register isn't used anymore!
   174         loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
   176         for (unsigned i = 0; i < count; i++) {
   177             if (!types->getTypeObject(i))
   178                 continue;
   180             if (lastBranch.isInitialized())
   181                 lastBranch.emit(*this);
   183             types::TypeObject *object = types->getTypeObject(i);
   184             lastBranch = BranchGCPtr(Equal, scratch, ImmGCPtr(object), &matched);
   185         }
   186     }
   188     if (!lastBranch.isInitialized()) {
   189         jump(miss);
   190         return;
   191     }
   193     lastBranch.invertCondition();
   194     lastBranch.relink(miss);
   195     lastBranch.emit(*this);
   197     bind(&matched);
   198     return;
   199 }
   201 template <typename Source> void
   202 MacroAssembler::guardType(const Source &address, types::Type type,
   203                           Register scratch, Label *miss)
   204 {
   205     TypeWrapper wrapper(type);
   206     guardTypeSet(address, &wrapper, scratch, miss);
   207 }
   209 template void MacroAssembler::guardTypeSet(const Address &address, const types::TemporaryTypeSet *types,
   210                                            Register scratch, Label *miss);
   211 template void MacroAssembler::guardTypeSet(const ValueOperand &value, const types::TemporaryTypeSet *types,
   212                                            Register scratch, Label *miss);
   214 template void MacroAssembler::guardTypeSet(const Address &address, const types::HeapTypeSet *types,
   215                                            Register scratch, Label *miss);
   216 template void MacroAssembler::guardTypeSet(const ValueOperand &value, const types::HeapTypeSet *types,
   217                                            Register scratch, Label *miss);
   218 template void MacroAssembler::guardTypeSet(const TypedOrValueRegister &reg, const types::HeapTypeSet *types,
   219                                            Register scratch, Label *miss);
   221 template void MacroAssembler::guardTypeSet(const Address &address, const types::TypeSet *types,
   222                                            Register scratch, Label *miss);
   223 template void MacroAssembler::guardTypeSet(const ValueOperand &value, const types::TypeSet *types,
   224                                            Register scratch, Label *miss);
   226 template void MacroAssembler::guardTypeSet(const Address &address, const TypeWrapper *types,
   227                                            Register scratch, Label *miss);
   228 template void MacroAssembler::guardTypeSet(const ValueOperand &value, const TypeWrapper *types,
   229                                            Register scratch, Label *miss);
   231 template void MacroAssembler::guardObjectType(Register obj, const types::TemporaryTypeSet *types,
   232                                               Register scratch, Label *miss);
   233 template void MacroAssembler::guardObjectType(Register obj, const types::TypeSet *types,
   234                                               Register scratch, Label *miss);
   235 template void MacroAssembler::guardObjectType(Register obj, const TypeWrapper *types,
   236                                               Register scratch, Label *miss);
   238 template void MacroAssembler::guardType(const Address &address, types::Type type,
   239                                         Register scratch, Label *miss);
   240 template void MacroAssembler::guardType(const ValueOperand &value, types::Type type,
   241                                         Register scratch, Label *miss);
   243 void
   244 MacroAssembler::branchNurseryPtr(Condition cond, const Address &ptr1, const ImmMaybeNurseryPtr &ptr2,
   245                                  Label *label)
   246 {
   247 #ifdef JSGC_GENERATIONAL
   248     if (ptr2.value && gc::IsInsideNursery(GetIonContext()->cx->runtime(), (void *)ptr2.value))
   249         embedsNurseryPointers_ = true;
   250 #endif
   251     branchPtr(cond, ptr1, ptr2, label);
   252 }
   254 void
   255 MacroAssembler::moveNurseryPtr(const ImmMaybeNurseryPtr &ptr, Register reg)
   256 {
   257 #ifdef JSGC_GENERATIONAL
   258     if (ptr.value && gc::IsInsideNursery(GetIonContext()->cx->runtime(), (void *)ptr.value))
   259         embedsNurseryPointers_ = true;
   260 #endif
   261     movePtr(ptr, reg);
   262 }
   264 template<typename S, typename T>
   265 static void
   266 StoreToTypedFloatArray(MacroAssembler &masm, int arrayType, const S &value, const T &dest)
   267 {
   268     switch (arrayType) {
   269       case ScalarTypeDescr::TYPE_FLOAT32:
   270         if (LIRGenerator::allowFloat32Optimizations()) {
   271             masm.storeFloat32(value, dest);
   272         } else {
   273 #ifdef JS_MORE_DETERMINISTIC
   274             // See the comment in TypedArrayObjectTemplate::doubleToNative.
   275             masm.canonicalizeDouble(value);
   276 #endif
   277             masm.convertDoubleToFloat32(value, ScratchFloatReg);
   278             masm.storeFloat32(ScratchFloatReg, dest);
   279         }
   280         break;
   281       case ScalarTypeDescr::TYPE_FLOAT64:
   282 #ifdef JS_MORE_DETERMINISTIC
   283         // See the comment in TypedArrayObjectTemplate::doubleToNative.
   284         masm.canonicalizeDouble(value);
   285 #endif
   286         masm.storeDouble(value, dest);
   287         break;
   288       default:
   289         MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
   290     }
   291 }
   293 void
   294 MacroAssembler::storeToTypedFloatArray(int arrayType, const FloatRegister &value,
   295                                        const BaseIndex &dest)
   296 {
   297     StoreToTypedFloatArray(*this, arrayType, value, dest);
   298 }
   299 void
   300 MacroAssembler::storeToTypedFloatArray(int arrayType, const FloatRegister &value,
   301                                        const Address &dest)
   302 {
   303     StoreToTypedFloatArray(*this, arrayType, value, dest);
   304 }
   306 template<typename T>
   307 void
   308 MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp,
   309                                    Label *fail)
   310 {
   311     switch (arrayType) {
   312       case ScalarTypeDescr::TYPE_INT8:
   313         load8SignExtend(src, dest.gpr());
   314         break;
   315       case ScalarTypeDescr::TYPE_UINT8:
   316       case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
   317         load8ZeroExtend(src, dest.gpr());
   318         break;
   319       case ScalarTypeDescr::TYPE_INT16:
   320         load16SignExtend(src, dest.gpr());
   321         break;
   322       case ScalarTypeDescr::TYPE_UINT16:
   323         load16ZeroExtend(src, dest.gpr());
   324         break;
   325       case ScalarTypeDescr::TYPE_INT32:
   326         load32(src, dest.gpr());
   327         break;
   328       case ScalarTypeDescr::TYPE_UINT32:
   329         if (dest.isFloat()) {
   330             load32(src, temp);
   331             convertUInt32ToDouble(temp, dest.fpu());
   332         } else {
   333             load32(src, dest.gpr());
   335             // Bail out if the value doesn't fit into a signed int32 value. This
   336             // is what allows MLoadTypedArrayElement to have a type() of
   337             // MIRType_Int32 for UInt32 array loads.
   338             branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
   339         }
   340         break;
   341       case ScalarTypeDescr::TYPE_FLOAT32:
   342         if (LIRGenerator::allowFloat32Optimizations()) {
   343             loadFloat32(src, dest.fpu());
   344             canonicalizeFloat(dest.fpu());
   345         } else {
   346             loadFloatAsDouble(src, dest.fpu());
   347             canonicalizeDouble(dest.fpu());
   348         }
   349         break;
   350       case ScalarTypeDescr::TYPE_FLOAT64:
   351         loadDouble(src, dest.fpu());
   352         canonicalizeDouble(dest.fpu());
   353         break;
   354       default:
   355         MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
   356     }
   357 }
   359 template void MacroAssembler::loadFromTypedArray(int arrayType, const Address &src, AnyRegister dest,
   360                                                  Register temp, Label *fail);
   361 template void MacroAssembler::loadFromTypedArray(int arrayType, const BaseIndex &src, AnyRegister dest,
   362                                                  Register temp, Label *fail);
   364 template<typename T>
   365 void
   366 MacroAssembler::loadFromTypedArray(int arrayType, const T &src, const ValueOperand &dest,
   367                                    bool allowDouble, Register temp, Label *fail)
   368 {
   369     switch (arrayType) {
   370       case ScalarTypeDescr::TYPE_INT8:
   371       case ScalarTypeDescr::TYPE_UINT8:
   372       case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
   373       case ScalarTypeDescr::TYPE_INT16:
   374       case ScalarTypeDescr::TYPE_UINT16:
   375       case ScalarTypeDescr::TYPE_INT32:
   376         loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()), InvalidReg, nullptr);
   377         tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
   378         break;
   379       case ScalarTypeDescr::TYPE_UINT32:
   380         // Don't clobber dest when we could fail, instead use temp.
   381         load32(src, temp);
   382         if (allowDouble) {
   383             // If the value fits in an int32, store an int32 type tag.
   384             // Else, convert the value to double and box it.
   385             Label done, isDouble;
   386             branchTest32(Assembler::Signed, temp, temp, &isDouble);
   387             {
   388                 tagValue(JSVAL_TYPE_INT32, temp, dest);
   389                 jump(&done);
   390             }
   391             bind(&isDouble);
   392             {
   393                 convertUInt32ToDouble(temp, ScratchFloatReg);
   394                 boxDouble(ScratchFloatReg, dest);
   395             }
   396             bind(&done);
   397         } else {
   398             // Bailout if the value does not fit in an int32.
   399             branchTest32(Assembler::Signed, temp, temp, fail);
   400             tagValue(JSVAL_TYPE_INT32, temp, dest);
   401         }
   402         break;
   403       case ScalarTypeDescr::TYPE_FLOAT32:
   404         loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloatReg), dest.scratchReg(),
   405                            nullptr);
   406         if (LIRGenerator::allowFloat32Optimizations())
   407             convertFloat32ToDouble(ScratchFloatReg, ScratchFloatReg);
   408         boxDouble(ScratchFloatReg, dest);
   409         break;
   410       case ScalarTypeDescr::TYPE_FLOAT64:
   411         loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloatReg), dest.scratchReg(),
   412                            nullptr);
   413         boxDouble(ScratchFloatReg, dest);
   414         break;
   415       default:
   416         MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
   417     }
   418 }
   420 template void MacroAssembler::loadFromTypedArray(int arrayType, const Address &src, const ValueOperand &dest,
   421                                                  bool allowDouble, Register temp, Label *fail);
   422 template void MacroAssembler::loadFromTypedArray(int arrayType, const BaseIndex &src, const ValueOperand &dest,
   423                                                  bool allowDouble, Register temp, Label *fail);
   425 void
   426 MacroAssembler::newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail,
   427                            gc::InitialHeap initialHeap /* = gc::DefaultHeap */)
   428 {
   429     // Inlined equivalent of js::gc::NewGCThing() without failure case handling.
   431     int thingSize = int(gc::Arena::thingSize(allocKind));
   433 #ifdef JS_GC_ZEAL
   434     // Don't execute the inline path if gcZeal is active.
   435     branch32(Assembler::NotEqual,
   436              AbsoluteAddress(GetIonContext()->runtime->addressOfGCZeal()), Imm32(0),
   437              fail);
   438 #endif
   440     // Don't execute the inline path if the compartment has an object metadata callback,
   441     // as the metadata to use for the object may vary between executions of the op.
   442     if (GetIonContext()->compartment->hasObjectMetadataCallback())
   443         jump(fail);
   445 #ifdef JSGC_GENERATIONAL
   446     // Always use nursery allocation if it is possible to do so. The jit
   447     // assumes a nursery pointer is returned to avoid barriers.
   448     if (allocKind <= gc::FINALIZE_OBJECT_LAST && initialHeap != gc::TenuredHeap) {
   449         // Inline Nursery::allocate. No explicit check for nursery.isEnabled()
   450         // is needed, as the comparison with the nursery's end will always fail
   451         // in such cases.
   452         const Nursery &nursery = GetIonContext()->runtime->gcNursery();
   453         loadPtr(AbsoluteAddress(nursery.addressOfPosition()), result);
   454         computeEffectiveAddress(Address(result, thingSize), temp);
   455         branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(nursery.addressOfCurrentEnd()), temp, fail);
   456         storePtr(temp, AbsoluteAddress(nursery.addressOfPosition()));
   457         return;
   458     }
   459 #endif // JSGC_GENERATIONAL
   461     CompileZone *zone = GetIonContext()->compartment->zone();
   463     // Inline FreeSpan::allocate.
   464     // There is always exactly one FreeSpan per allocKind per JSCompartment.
   465     // If a FreeSpan is replaced, its members are updated in the freeLists table,
   466     // which the code below always re-reads.
   467     loadPtr(AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)), result);
   468     branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(zone->addressOfFreeListLast(allocKind)), result, fail);
   469     computeEffectiveAddress(Address(result, thingSize), temp);
   470     storePtr(temp, AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)));
   471 }
   473 void
   474 MacroAssembler::newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail,
   475                            gc::InitialHeap initialHeap)
   476 {
   477     gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
   478     JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
   480     newGCThing(result, temp, allocKind, fail, initialHeap);
   481 }
   483 void
   484 MacroAssembler::newGCString(Register result, Register temp, Label *fail)
   485 {
   486     newGCThing(result, temp, js::gc::FINALIZE_STRING, fail);
   487 }
   489 void
   490 MacroAssembler::newGCFatInlineString(Register result, Register temp, Label *fail)
   491 {
   492     newGCThing(result, temp, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
   493 }
   495 void
   496 MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   497                               gc::AllocKind allocKind, Label *fail)
   498 {
   499     // Similar to ::newGCThing(), except that it allocates from a custom
   500     // Allocator in the ForkJoinContext*, rather than being hardcoded to the
   501     // compartment allocator.  This requires two temporary registers.
   502     //
   503     // Subtle: I wanted to reuse `result` for one of the temporaries, but the
   504     // register allocator was assigning it to the same register as `cx`.
   505     // Then we overwrite that register which messed up the OOL code.
   507     uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
   509     // Load the allocator:
   510     // tempReg1 = (Allocator*) forkJoinCx->allocator()
   511     loadPtr(Address(cx, ThreadSafeContext::offsetOfAllocator()),
   512             tempReg1);
   514     // Get a pointer to the relevant free list:
   515     // tempReg1 = (FreeSpan*) &tempReg1->arenas.freeLists[(allocKind)]
   516     uint32_t offset = (offsetof(Allocator, arenas) +
   517                        js::gc::ArenaLists::getFreeListOffset(allocKind));
   518     addPtr(Imm32(offset), tempReg1);
   520     // Load first item on the list
   521     // tempReg2 = tempReg1->first
   522     loadPtr(Address(tempReg1, offsetof(gc::FreeSpan, first)), tempReg2);
   524     // Check whether list is empty
   525     // if tempReg1->last <= tempReg2, fail
   526     branchPtr(Assembler::BelowOrEqual,
   527               Address(tempReg1, offsetof(gc::FreeSpan, last)),
   528               tempReg2,
   529               fail);
   531     // If not, take first and advance pointer by thingSize bytes.
   532     // result = tempReg2;
   533     // tempReg2 += thingSize;
   534     movePtr(tempReg2, result);
   535     addPtr(Imm32(thingSize), tempReg2);
   537     // Update `first`
   538     // tempReg1->first = tempReg2;
   539     storePtr(tempReg2, Address(tempReg1, offsetof(gc::FreeSpan, first)));
   540 }
   542 void
   543 MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   544                               JSObject *templateObject, Label *fail)
   545 {
   546     gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
   547     JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
   549     newGCThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
   550 }
   552 void
   553 MacroAssembler::newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   554                                Label *fail)
   555 {
   556     newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
   557 }
   559 void
   560 MacroAssembler::newGCFatInlineStringPar(Register result, Register cx, Register tempReg1,
   561                                         Register tempReg2, Label *fail)
   562 {
   563     newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
   564 }
   566 void
   567 MacroAssembler::copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj,
   568                                       uint32_t start, uint32_t end)
   569 {
   570     uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
   571     for (unsigned i = start; i < nfixed; i++)
   572         storeValue(templateObj->getFixedSlot(i), Address(obj, JSObject::getFixedSlotOffset(i)));
   573 }
   575 void
   576 MacroAssembler::fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj,
   577                                        uint32_t start, uint32_t end)
   578 {
   579 #ifdef JS_NUNBOX32
   580     // We only have a single spare register, so do the initialization as two
   581     // strided writes of the tag and body.
   582     jsval_layout jv = JSVAL_TO_IMPL(UndefinedValue());
   583     uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
   585     mov(ImmWord(jv.s.tag), temp);
   586     for (unsigned i = start; i < nfixed; i++)
   587         store32(temp, ToType(Address(obj, JSObject::getFixedSlotOffset(i))));
   589     mov(ImmWord(jv.s.payload.i32), temp);
   590     for (unsigned i = start; i < nfixed; i++)
   591         store32(temp, ToPayload(Address(obj, JSObject::getFixedSlotOffset(i))));
   592 #else
   593     moveValue(UndefinedValue(), temp);
   594     uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
   595     for (unsigned i = start; i < nfixed; i++)
   596         storePtr(temp, Address(obj, JSObject::getFixedSlotOffset(i)));
   597 #endif
   598 }
   600 static uint32_t
   601 FindStartOfUndefinedSlots(JSObject *templateObj, uint32_t nslots)
   602 {
   603     JS_ASSERT(nslots == templateObj->lastProperty()->slotSpan(templateObj->getClass()));
   604     JS_ASSERT(nslots > 0);
   605     for (uint32_t first = nslots; first != 0; --first) {
   606         if (templateObj->getSlot(first - 1) != UndefinedValue())
   607             return first;
   608     }
   609     return 0;
   610 }
   612 void
   613 MacroAssembler::initGCSlots(Register obj, Register temp, JSObject *templateObj)
   614 {
   615     // Slots of non-array objects are required to be initialized.
   616     // Use the values currently in the template object.
   617     uint32_t nslots = templateObj->lastProperty()->slotSpan(templateObj->getClass());
   618     if (nslots == 0)
   619         return;
   621     // Attempt to group slot writes such that we minimize the amount of
   622     // duplicated data we need to embed in code and load into registers. In
   623     // general, most template object slots will be undefined except for any
   624     // reserved slots. Since reserved slots come first, we split the object
   625     // logically into independent non-UndefinedValue writes to the head and
   626     // duplicated writes of UndefinedValue to the tail. For the majority of
   627     // objects, the "tail" will be the entire slot range.
   628     uint32_t startOfUndefined = FindStartOfUndefinedSlots(templateObj, nslots);
   629     copySlotsFromTemplate(obj, temp, templateObj, 0, startOfUndefined);
   630     fillSlotsWithUndefined(obj, temp, templateObj, startOfUndefined, nslots);
   631 }
   633 void
   634 MacroAssembler::initGCThing(Register obj, Register temp, JSObject *templateObj)
   635 {
   636     // Fast initialization of an empty object returned by NewGCThing().
   638     JS_ASSERT(!templateObj->hasDynamicElements());
   640     storePtr(ImmGCPtr(templateObj->lastProperty()), Address(obj, JSObject::offsetOfShape()));
   641     storePtr(ImmGCPtr(templateObj->type()), Address(obj, JSObject::offsetOfType()));
   642     storePtr(ImmPtr(nullptr), Address(obj, JSObject::offsetOfSlots()));
   644     if (templateObj->is<ArrayObject>()) {
   645         JS_ASSERT(!templateObj->getDenseInitializedLength());
   647         int elementsOffset = JSObject::offsetOfFixedElements();
   649         computeEffectiveAddress(Address(obj, elementsOffset), temp);
   650         storePtr(temp, Address(obj, JSObject::offsetOfElements()));
   652         // Fill in the elements header.
   653         store32(Imm32(templateObj->getDenseCapacity()),
   654                 Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
   655         store32(Imm32(templateObj->getDenseInitializedLength()),
   656                 Address(obj, elementsOffset + ObjectElements::offsetOfInitializedLength()));
   657         store32(Imm32(templateObj->as<ArrayObject>().length()),
   658                 Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
   659         store32(Imm32(templateObj->shouldConvertDoubleElements()
   660                       ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
   661                       : 0),
   662                 Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
   663         JS_ASSERT(!templateObj->hasPrivate());
   664     } else {
   665         storePtr(ImmPtr(emptyObjectElements), Address(obj, JSObject::offsetOfElements()));
   667         initGCSlots(obj, temp, templateObj);
   669         if (templateObj->hasPrivate()) {
   670             uint32_t nfixed = templateObj->numFixedSlots();
   671             storePtr(ImmPtr(templateObj->getPrivate()),
   672                      Address(obj, JSObject::getPrivateDataOffset(nfixed)));
   673         }
   674     }
   675 }
   677 void
   678 MacroAssembler::compareStrings(JSOp op, Register left, Register right, Register result,
   679                                Register temp, Label *fail)
   680 {
   681     JS_ASSERT(IsEqualityOp(op));
   683     Label done;
   684     Label notPointerEqual;
   685     // Fast path for identical strings.
   686     branchPtr(Assembler::NotEqual, left, right, &notPointerEqual);
   687     move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), result);
   688     jump(&done);
   690     bind(&notPointerEqual);
   691     loadPtr(Address(left, JSString::offsetOfLengthAndFlags()), result);
   692     loadPtr(Address(right, JSString::offsetOfLengthAndFlags()), temp);
   694     Label notAtom;
   695     // Optimize the equality operation to a pointer compare for two atoms.
   696     Imm32 atomBit(JSString::ATOM_BIT);
   697     branchTest32(Assembler::Zero, result, atomBit, &notAtom);
   698     branchTest32(Assembler::Zero, temp, atomBit, &notAtom);
   700     cmpPtrSet(JSOpToCondition(MCompare::Compare_String, op), left, right, result);
   701     jump(&done);
   703     bind(&notAtom);
   704     // Strings of different length can never be equal.
   705     rshiftPtr(Imm32(JSString::LENGTH_SHIFT), result);
   706     rshiftPtr(Imm32(JSString::LENGTH_SHIFT), temp);
   707     branchPtr(Assembler::Equal, result, temp, fail);
   708     move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
   710     bind(&done);
   711 }
   713 void
   714 MacroAssembler::checkInterruptFlagPar(Register tempReg, Label *fail)
   715 {
   716 #ifdef JS_THREADSAFE
   717     movePtr(ImmPtr(GetIonContext()->runtime->addressOfInterruptPar()), tempReg);
   718     branch32(Assembler::NonZero, Address(tempReg, 0), Imm32(0), fail);
   719 #else
   720     MOZ_ASSUME_UNREACHABLE("JSRuntime::interruptPar doesn't exist on non-threadsafe builds.");
   721 #endif
   722 }
   724 static void
   725 ReportOverRecursed(JSContext *cx)
   726 {
   727     js_ReportOverRecursed(cx);
   728 }
   730 void
   731 MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
   732 {
   733     enterExitFrame();
   735     Label baseline;
   737     // The return value from Bailout is tagged as:
   738     // - 0x0: done (enter baseline)
   739     // - 0x1: error (handle exception)
   740     // - 0x2: overrecursed
   741     JS_STATIC_ASSERT(BAILOUT_RETURN_OK == 0);
   742     JS_STATIC_ASSERT(BAILOUT_RETURN_FATAL_ERROR == 1);
   743     JS_STATIC_ASSERT(BAILOUT_RETURN_OVERRECURSED == 2);
   745     branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_OK), &baseline);
   746     branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_FATAL_ERROR), exceptionLabel());
   748     // Fall-through: overrecursed.
   749     {
   750         loadJSContext(ReturnReg);
   751         setupUnalignedABICall(1, scratch);
   752         passABIArg(ReturnReg);
   753         callWithABI(JS_FUNC_TO_DATA_PTR(void *, ReportOverRecursed));
   754         jump(exceptionLabel());
   755     }
   757     bind(&baseline);
   758     {
   759         // Prepare a register set for use in this case.
   760         GeneralRegisterSet regs(GeneralRegisterSet::All());
   761         JS_ASSERT(!regs.has(BaselineStackReg));
   762         regs.take(bailoutInfo);
   764         // Reset SP to the point where clobbering starts.
   765         loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)),
   766                 BaselineStackReg);
   768         Register copyCur = regs.takeAny();
   769         Register copyEnd = regs.takeAny();
   770         Register temp = regs.takeAny();
   772         // Copy data onto stack.
   773         loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)), copyCur);
   774         loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)), copyEnd);
   775         {
   776             Label copyLoop;
   777             Label endOfCopy;
   778             bind(&copyLoop);
   779             branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
   780             subPtr(Imm32(4), copyCur);
   781             subPtr(Imm32(4), BaselineStackReg);
   782             load32(Address(copyCur, 0), temp);
   783             store32(temp, Address(BaselineStackReg, 0));
   784             jump(&copyLoop);
   785             bind(&endOfCopy);
   786         }
   788         // Enter exit frame for the FinishBailoutToBaseline call.
   789         loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)), temp);
   790         load32(Address(temp, BaselineFrame::reverseOffsetOfFrameSize()), temp);
   791         makeFrameDescriptor(temp, JitFrame_BaselineJS);
   792         push(temp);
   793         push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
   794         enterFakeExitFrame();
   796         // If monitorStub is non-null, handle resumeAddr appropriately.
   797         Label noMonitor;
   798         Label done;
   799         branchPtr(Assembler::Equal,
   800                   Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)),
   801                   ImmPtr(nullptr),
   802                   &noMonitor);
   804         //
   805         // Resuming into a monitoring stub chain.
   806         //
   807         {
   808             // Save needed values onto stack temporarily.
   809             pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
   810             push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
   811             push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
   812             push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)));
   814             // Call a stub to free allocated memory and create arguments objects.
   815             setupUnalignedABICall(1, temp);
   816             passABIArg(bailoutInfo);
   817             callWithABI(JS_FUNC_TO_DATA_PTR(void *, FinishBailoutToBaseline));
   818             branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
   820             // Restore values where they need to be and resume execution.
   821             GeneralRegisterSet enterMonRegs(GeneralRegisterSet::All());
   822             enterMonRegs.take(R0);
   823             enterMonRegs.take(BaselineStubReg);
   824             enterMonRegs.take(BaselineFrameReg);
   825             enterMonRegs.takeUnchecked(BaselineTailCallReg);
   827             pop(BaselineStubReg);
   828             pop(BaselineTailCallReg);
   829             pop(BaselineFrameReg);
   830             popValue(R0);
   832             // Discard exit frame.
   833             addPtr(Imm32(IonExitFrameLayout::SizeWithFooter()), StackPointer);
   835 #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
   836             push(BaselineTailCallReg);
   837 #endif
   838             jump(Address(BaselineStubReg, ICStub::offsetOfStubCode()));
   839         }
   841         //
   842         // Resuming into main jitcode.
   843         //
   844         bind(&noMonitor);
   845         {
   846             // Save needed values onto stack temporarily.
   847             pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
   848             pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR1)));
   849             push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
   850             push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
   852             // Call a stub to free allocated memory and create arguments objects.
   853             setupUnalignedABICall(1, temp);
   854             passABIArg(bailoutInfo);
   855             callWithABI(JS_FUNC_TO_DATA_PTR(void *, FinishBailoutToBaseline));
   856             branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
   858             // Restore values where they need to be and resume execution.
   859             GeneralRegisterSet enterRegs(GeneralRegisterSet::All());
   860             enterRegs.take(R0);
   861             enterRegs.take(R1);
   862             enterRegs.take(BaselineFrameReg);
   863             Register jitcodeReg = enterRegs.takeAny();
   865             pop(jitcodeReg);
   866             pop(BaselineFrameReg);
   867             popValue(R1);
   868             popValue(R0);
   870             // Discard exit frame.
   871             addPtr(Imm32(IonExitFrameLayout::SizeWithFooter()), StackPointer);
   873             jump(jitcodeReg);
   874         }
   875     }
   876 }
   878 void
   879 MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode,
   880                                      Label *failure)
   881 {
   882     if (mode == SequentialExecution) {
   883         loadPtr(Address(script, JSScript::offsetOfBaselineOrIonRaw()), dest);
   884         if (failure)
   885             branchTestPtr(Assembler::Zero, dest, dest, failure);
   886     } else {
   887         loadPtr(Address(script, JSScript::offsetOfParallelIonScript()), dest);
   888         if (failure)
   889             branchPtr(Assembler::BelowOrEqual, dest, ImmPtr(ION_COMPILING_SCRIPT), failure);
   890         loadPtr(Address(dest, IonScript::offsetOfMethod()), dest);
   891         loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
   892     }
   893 }
   895 void
   896 MacroAssembler::loadBaselineOrIonNoArgCheck(Register script, Register dest, ExecutionMode mode,
   897                                             Label *failure)
   898 {
   899     if (mode == SequentialExecution) {
   900         loadPtr(Address(script, JSScript::offsetOfBaselineOrIonSkipArgCheck()), dest);
   901         if (failure)
   902             branchTestPtr(Assembler::Zero, dest, dest, failure);
   903     } else {
   904         // Find second register to get the offset to skip argument check
   905         Register offset = script;
   906         if (script == dest) {
   907             GeneralRegisterSet regs(GeneralRegisterSet::All());
   908             regs.take(dest);
   909             offset = regs.takeAny();
   910         }
   912         loadPtr(Address(script, JSScript::offsetOfParallelIonScript()), dest);
   913         if (failure)
   914             branchPtr(Assembler::BelowOrEqual, dest, ImmPtr(ION_COMPILING_SCRIPT), failure);
   916         Push(offset);
   917         load32(Address(script, IonScript::offsetOfSkipArgCheckEntryOffset()), offset);
   919         loadPtr(Address(dest, IonScript::offsetOfMethod()), dest);
   920         loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
   921         addPtr(offset, dest);
   923         Pop(offset);
   924     }
   925 }
   927 void
   928 MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest)
   929 {
   930     if (framePtr != dest)
   931         movePtr(framePtr, dest);
   932     subPtr(Imm32(BaselineFrame::Size()), dest);
   933 }
   935 void
   936 MacroAssembler::loadForkJoinContext(Register cx, Register scratch)
   937 {
   938     // Load the current ForkJoinContext *. If we need a parallel exit frame,
   939     // chances are we are about to do something very slow anyways, so just
   940     // call ForkJoinContextPar again instead of using the cached version.
   941     setupUnalignedABICall(0, scratch);
   942     callWithABI(JS_FUNC_TO_DATA_PTR(void *, ForkJoinContextPar));
   943     if (ReturnReg != cx)
   944         movePtr(ReturnReg, cx);
   945 }
   947 void
   948 MacroAssembler::loadContext(Register cxReg, Register scratch, ExecutionMode executionMode)
   949 {
   950     switch (executionMode) {
   951       case SequentialExecution:
   952         // The scratch register is not used for sequential execution.
   953         loadJSContext(cxReg);
   954         break;
   955       case ParallelExecution:
   956         loadForkJoinContext(cxReg, scratch);
   957         break;
   958       default:
   959         MOZ_ASSUME_UNREACHABLE("No such execution mode");
   960     }
   961 }
   963 void
   964 MacroAssembler::enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx,
   965                                                      Register scratch)
   966 {
   967     loadForkJoinContext(cx, scratch);
   968     // Load the PerThreadData from from the cx.
   969     loadPtr(Address(cx, offsetof(ForkJoinContext, perThreadData)), scratch);
   970     linkParallelExitFrame(scratch);
   971     // Push the ioncode.
   972     exitCodePatch_ = PushWithPatch(ImmWord(-1));
   973     // Push the VMFunction pointer, to mark arguments.
   974     Push(ImmPtr(f));
   975 }
   977 void
   978 MacroAssembler::enterFakeParallelExitFrame(Register cx, Register scratch,
   979                                            JitCode *codeVal)
   980 {
   981     // Load the PerThreadData from from the cx.
   982     loadPtr(Address(cx, offsetof(ForkJoinContext, perThreadData)), scratch);
   983     linkParallelExitFrame(scratch);
   984     Push(ImmPtr(codeVal));
   985     Push(ImmPtr(nullptr));
   986 }
   988 void
   989 MacroAssembler::enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch,
   990                                              ExecutionMode executionMode)
   991 {
   992     switch (executionMode) {
   993       case SequentialExecution:
   994         // The scratch register is not used for sequential execution.
   995         enterExitFrame(f);
   996         loadJSContext(cxReg);
   997         break;
   998       case ParallelExecution:
   999         enterParallelExitFrameAndLoadContext(f, cxReg, scratch);
  1000         break;
  1001       default:
  1002         MOZ_ASSUME_UNREACHABLE("No such execution mode");
  1006 void
  1007 MacroAssembler::enterFakeExitFrame(Register cxReg, Register scratch,
  1008                                    ExecutionMode executionMode,
  1009                                    JitCode *codeVal)
  1011     switch (executionMode) {
  1012       case SequentialExecution:
  1013         // The cx and scratch registers are not used for sequential execution.
  1014         enterFakeExitFrame(codeVal);
  1015         break;
  1016       case ParallelExecution:
  1017         enterFakeParallelExitFrame(cxReg, scratch, codeVal);
  1018         break;
  1019       default:
  1020         MOZ_ASSUME_UNREACHABLE("No such execution mode");
  1024 void
  1025 MacroAssembler::handleFailure(ExecutionMode executionMode)
  1027     // Re-entry code is irrelevant because the exception will leave the
  1028     // running function and never come back
  1029     if (sps_)
  1030         sps_->skipNextReenter();
  1031     leaveSPSFrame();
  1033     void *handler;
  1034     switch (executionMode) {
  1035       case SequentialExecution:
  1036         handler = JS_FUNC_TO_DATA_PTR(void *, jit::HandleException);
  1037         break;
  1038       case ParallelExecution:
  1039         handler = JS_FUNC_TO_DATA_PTR(void *, jit::HandleParallelFailure);
  1040         break;
  1041       default:
  1042         MOZ_ASSUME_UNREACHABLE("No such execution mode");
  1044     MacroAssemblerSpecific::handleFailureWithHandler(handler);
  1046     // Doesn't actually emit code, but balances the leave()
  1047     if (sps_)
  1048         sps_->reenter(*this, InvalidReg);
  1051 #ifdef DEBUG
  1052 static inline bool
  1053 IsCompilingAsmJS()
  1055     // asm.js compilation pushes an IonContext with a null JSCompartment.
  1056     IonContext *ictx = MaybeGetIonContext();
  1057     return ictx && ictx->compartment == nullptr;
  1060 static void
  1061 AssumeUnreachable_(const char *output) {
  1062     MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
  1064 #endif
  1066 void
  1067 MacroAssembler::assumeUnreachable(const char *output)
  1069 #ifdef DEBUG
  1070     RegisterSet regs = RegisterSet::Volatile();
  1071     PushRegsInMask(regs);
  1072     Register temp = regs.takeGeneral();
  1074     // With ASLR, we can't rely on 'output' to point to the
  1075     // same char array after serialization/deserialization.
  1076     // It is not possible until we modify AsmJsImmPtr and
  1077     // the underlying "patching" mechanism.
  1078     if (IsCompilingAsmJS()) {
  1079         setupUnalignedABICall(0, temp);
  1080         callWithABINoProfiling(AsmJSImm_AssumeUnreachable);
  1081     } else {
  1082         setupUnalignedABICall(1, temp);
  1083         movePtr(ImmPtr(output), temp);
  1084         passABIArg(temp);
  1085         callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, AssumeUnreachable_));
  1087     PopRegsInMask(RegisterSet::Volatile());
  1088 #endif
  1090     breakpoint();
  1093 static void
  1094 Printf0_(const char *output) {
  1095     printf("%s", output);
  1098 void
  1099 MacroAssembler::printf(const char *output)
  1101     RegisterSet regs = RegisterSet::Volatile();
  1102     PushRegsInMask(regs);
  1104     Register temp = regs.takeGeneral();
  1106     setupUnalignedABICall(1, temp);
  1107     movePtr(ImmPtr(output), temp);
  1108     passABIArg(temp);
  1109     callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, Printf0_));
  1111     PopRegsInMask(RegisterSet::Volatile());
  1114 static void
  1115 Printf1_(const char *output, uintptr_t value) {
  1116     char *line = JS_sprintf_append(nullptr, output, value);
  1117     printf("%s", line);
  1118     js_free(line);
  1121 void
  1122 MacroAssembler::printf(const char *output, Register value)
  1124     RegisterSet regs = RegisterSet::Volatile();
  1125     PushRegsInMask(regs);
  1127     regs.takeUnchecked(value);
  1129     Register temp = regs.takeGeneral();
  1131     setupUnalignedABICall(2, temp);
  1132     movePtr(ImmPtr(output), temp);
  1133     passABIArg(temp);
  1134     passABIArg(value);
  1135     callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, Printf1_));
  1137     PopRegsInMask(RegisterSet::Volatile());
  1140 #ifdef JS_TRACE_LOGGING
  1141 void
  1142 MacroAssembler::tracelogStart(Register logger, uint32_t textId)
  1144     void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStartEvent;
  1146     PushRegsInMask(RegisterSet::Volatile());
  1148     RegisterSet regs = RegisterSet::Volatile();
  1149     regs.takeUnchecked(logger);
  1151     Register temp = regs.takeGeneral();
  1153     setupUnalignedABICall(2, temp);
  1154     passABIArg(logger);
  1155     move32(Imm32(textId), temp);
  1156     passABIArg(temp);
  1157     callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1159     PopRegsInMask(RegisterSet::Volatile());
  1162 void
  1163 MacroAssembler::tracelogStart(Register logger, Register textId)
  1165     void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStartEvent;
  1167     PushRegsInMask(RegisterSet::Volatile());
  1169     RegisterSet regs = RegisterSet::Volatile();
  1170     regs.takeUnchecked(logger);
  1171     regs.takeUnchecked(textId);
  1173     Register temp = regs.takeGeneral();
  1175     setupUnalignedABICall(2, temp);
  1176     passABIArg(logger);
  1177     passABIArg(textId);
  1178     callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1180     regs.add(temp);
  1182     PopRegsInMask(RegisterSet::Volatile());
  1185 void
  1186 MacroAssembler::tracelogStop(Register logger, uint32_t textId)
  1188     void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStopEvent;
  1190     PushRegsInMask(RegisterSet::Volatile());
  1192     RegisterSet regs = RegisterSet::Volatile();
  1193     regs.takeUnchecked(logger);
  1195     Register temp = regs.takeGeneral();
  1197     setupUnalignedABICall(2, temp);
  1198     passABIArg(logger);
  1199     move32(Imm32(textId), temp);
  1200     passABIArg(temp);
  1201     callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1203     regs.add(temp);
  1205     PopRegsInMask(RegisterSet::Volatile());
  1208 void
  1209 MacroAssembler::tracelogStop(Register logger, Register textId)
  1211 #ifdef DEBUG
  1212     void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStopEvent;
  1214     PushRegsInMask(RegisterSet::Volatile());
  1216     RegisterSet regs = RegisterSet::Volatile();
  1217     regs.takeUnchecked(logger);
  1218     regs.takeUnchecked(textId);
  1220     Register temp = regs.takeGeneral();
  1222     setupUnalignedABICall(2, temp);
  1223     passABIArg(logger);
  1224     passABIArg(textId);
  1225     callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1227     regs.add(temp);
  1229     PopRegsInMask(RegisterSet::Volatile());
  1230 #else
  1231     tracelogStop(logger);
  1232 #endif
  1235 void
  1236 MacroAssembler::tracelogStop(Register logger)
  1238     void (&TraceLogFunc)(TraceLogger*) = TraceLogStopEvent;
  1240     PushRegsInMask(RegisterSet::Volatile());
  1242     RegisterSet regs = RegisterSet::Volatile();
  1243     regs.takeUnchecked(logger);
  1245     Register temp = regs.takeGeneral();
  1247     setupUnalignedABICall(1, temp);
  1248     passABIArg(logger);
  1249     callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1251     regs.add(temp);
  1253     PopRegsInMask(RegisterSet::Volatile());
  1255 #endif
  1257 void
  1258 MacroAssembler::convertInt32ValueToDouble(const Address &address, Register scratch, Label *done)
  1260     branchTestInt32(Assembler::NotEqual, address, done);
  1261     unboxInt32(address, scratch);
  1262     convertInt32ToDouble(scratch, ScratchFloatReg);
  1263     storeDouble(ScratchFloatReg, address);
  1266 void
  1267 MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
  1268                                             Label *fail, MIRType outputType)
  1270     Register tag = splitTagForTest(value);
  1272     Label isDouble, isInt32, isBool, isNull, done;
  1274     branchTestDouble(Assembler::Equal, tag, &isDouble);
  1275     branchTestInt32(Assembler::Equal, tag, &isInt32);
  1276     branchTestBoolean(Assembler::Equal, tag, &isBool);
  1277     branchTestNull(Assembler::Equal, tag, &isNull);
  1278     branchTestUndefined(Assembler::NotEqual, tag, fail);
  1280     // fall-through: undefined
  1281     loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
  1282     jump(&done);
  1284     bind(&isNull);
  1285     loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1286     jump(&done);
  1288     bind(&isBool);
  1289     boolValueToFloatingPoint(value, output, outputType);
  1290     jump(&done);
  1292     bind(&isInt32);
  1293     int32ValueToFloatingPoint(value, output, outputType);
  1294     jump(&done);
  1296     bind(&isDouble);
  1297     unboxDouble(value, output);
  1298     if (outputType == MIRType_Float32)
  1299         convertDoubleToFloat32(output, output);
  1300     bind(&done);
  1303 bool
  1304 MacroAssembler::convertValueToFloatingPoint(JSContext *cx, const Value &v, FloatRegister output,
  1305                                             Label *fail, MIRType outputType)
  1307     if (v.isNumber() || v.isString()) {
  1308         double d;
  1309         if (v.isNumber())
  1310             d = v.toNumber();
  1311         else if (!StringToNumber(cx, v.toString(), &d))
  1312             return false;
  1314         loadConstantFloatingPoint(d, (float)d, output, outputType);
  1315         return true;
  1318     if (v.isBoolean()) {
  1319         if (v.toBoolean())
  1320             loadConstantFloatingPoint(1.0, 1.0f, output, outputType);
  1321         else
  1322             loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1323         return true;
  1326     if (v.isNull()) {
  1327         loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1328         return true;
  1331     if (v.isUndefined()) {
  1332         loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
  1333         return true;
  1336     JS_ASSERT(v.isObject());
  1337     jump(fail);
  1338     return true;
  1341 void
  1342 MacroAssembler::PushEmptyRooted(VMFunction::RootType rootType)
  1344     switch (rootType) {
  1345       case VMFunction::RootNone:
  1346         MOZ_ASSUME_UNREACHABLE("Handle must have root type");
  1347       case VMFunction::RootObject:
  1348       case VMFunction::RootString:
  1349       case VMFunction::RootPropertyName:
  1350       case VMFunction::RootFunction:
  1351       case VMFunction::RootCell:
  1352         Push(ImmPtr(nullptr));
  1353         break;
  1354       case VMFunction::RootValue:
  1355         Push(UndefinedValue());
  1356         break;
  1360 void
  1361 MacroAssembler::popRooted(VMFunction::RootType rootType, Register cellReg,
  1362                           const ValueOperand &valueReg)
  1364     switch (rootType) {
  1365       case VMFunction::RootNone:
  1366         MOZ_ASSUME_UNREACHABLE("Handle must have root type");
  1367       case VMFunction::RootObject:
  1368       case VMFunction::RootString:
  1369       case VMFunction::RootPropertyName:
  1370       case VMFunction::RootFunction:
  1371       case VMFunction::RootCell:
  1372         Pop(cellReg);
  1373         break;
  1374       case VMFunction::RootValue:
  1375         Pop(valueReg);
  1376         break;
  1380 bool
  1381 MacroAssembler::convertConstantOrRegisterToFloatingPoint(JSContext *cx, ConstantOrRegister src,
  1382                                                          FloatRegister output, Label *fail,
  1383                                                          MIRType outputType)
  1385     if (src.constant())
  1386         return convertValueToFloatingPoint(cx, src.value(), output, fail, outputType);
  1388     convertTypedOrValueToFloatingPoint(src.reg(), output, fail, outputType);
  1389     return true;
  1392 void
  1393 MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
  1394                                                    Label *fail, MIRType outputType)
  1396     JS_ASSERT(IsFloatingPointType(outputType));
  1398     if (src.hasValue()) {
  1399         convertValueToFloatingPoint(src.valueReg(), output, fail, outputType);
  1400         return;
  1403     bool outputIsDouble = outputType == MIRType_Double;
  1404     switch (src.type()) {
  1405       case MIRType_Null:
  1406         loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1407         break;
  1408       case MIRType_Boolean:
  1409       case MIRType_Int32:
  1410         convertInt32ToFloatingPoint(src.typedReg().gpr(), output, outputType);
  1411         break;
  1412       case MIRType_Float32:
  1413         if (outputIsDouble) {
  1414             convertFloat32ToDouble(src.typedReg().fpu(), output);
  1415         } else {
  1416             if (src.typedReg().fpu() != output)
  1417                 moveFloat32(src.typedReg().fpu(), output);
  1419         break;
  1420       case MIRType_Double:
  1421         if (outputIsDouble) {
  1422             if (src.typedReg().fpu() != output)
  1423                 moveDouble(src.typedReg().fpu(), output);
  1424         } else {
  1425             convertDoubleToFloat32(src.typedReg().fpu(), output);
  1427         break;
  1428       case MIRType_Object:
  1429       case MIRType_String:
  1430         jump(fail);
  1431         break;
  1432       case MIRType_Undefined:
  1433         loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
  1434         break;
  1435       default:
  1436         MOZ_ASSUME_UNREACHABLE("Bad MIRType");
  1440 void
  1441 MacroAssembler::convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
  1442                                    Label *truncateFail, Label *fail,
  1443                                    IntConversionBehavior behavior)
  1445     switch (behavior) {
  1446       case IntConversion_Normal:
  1447       case IntConversion_NegativeZeroCheck:
  1448         convertDoubleToInt32(src, output, fail, behavior == IntConversion_NegativeZeroCheck);
  1449         break;
  1450       case IntConversion_Truncate:
  1451         branchTruncateDouble(src, output, truncateFail ? truncateFail : fail);
  1452         break;
  1453       case IntConversion_ClampToUint8:
  1454         // Clamping clobbers the input register, so use a temp.
  1455         moveDouble(src, temp);
  1456         clampDoubleToUint8(temp, output);
  1457         break;
  1461 void
  1462 MacroAssembler::convertValueToInt(ValueOperand value, MDefinition *maybeInput,
  1463                                   Label *handleStringEntry, Label *handleStringRejoin,
  1464                                   Label *truncateDoubleSlow,
  1465                                   Register stringReg, FloatRegister temp, Register output,
  1466                                   Label *fail, IntConversionBehavior behavior,
  1467                                   IntConversionInputKind conversion)
  1469     Register tag = splitTagForTest(value);
  1470     bool handleStrings = (behavior == IntConversion_Truncate ||
  1471                           behavior == IntConversion_ClampToUint8) &&
  1472                          handleStringEntry &&
  1473                          handleStringRejoin;
  1475     JS_ASSERT_IF(handleStrings, conversion == IntConversion_Any);
  1477     Label done, isInt32, isBool, isDouble, isNull, isString;
  1479     branchEqualTypeIfNeeded(MIRType_Int32, maybeInput, tag, &isInt32);
  1480     if (conversion == IntConversion_Any || conversion == IntConversion_NumbersOrBoolsOnly)
  1481         branchEqualTypeIfNeeded(MIRType_Boolean, maybeInput, tag, &isBool);
  1482     branchEqualTypeIfNeeded(MIRType_Double, maybeInput, tag, &isDouble);
  1484     if (conversion == IntConversion_Any) {
  1485         // If we are not truncating, we fail for anything that's not
  1486         // null. Otherwise we might be able to handle strings and objects.
  1487         switch (behavior) {
  1488           case IntConversion_Normal:
  1489           case IntConversion_NegativeZeroCheck:
  1490             branchTestNull(Assembler::NotEqual, tag, fail);
  1491             break;
  1493           case IntConversion_Truncate:
  1494           case IntConversion_ClampToUint8:
  1495             branchEqualTypeIfNeeded(MIRType_Null, maybeInput, tag, &isNull);
  1496             if (handleStrings)
  1497                 branchEqualTypeIfNeeded(MIRType_String, maybeInput, tag, &isString);
  1498             branchEqualTypeIfNeeded(MIRType_Object, maybeInput, tag, fail);
  1499             branchTestUndefined(Assembler::NotEqual, tag, fail);
  1500             break;
  1502     } else {
  1503         jump(fail);
  1506     // The value is null or undefined in truncation contexts - just emit 0.
  1507     if (isNull.used())
  1508         bind(&isNull);
  1509     mov(ImmWord(0), output);
  1510     jump(&done);
  1512     // Try converting a string into a double, then jump to the double case.
  1513     if (handleStrings) {
  1514         bind(&isString);
  1515         unboxString(value, stringReg);
  1516         jump(handleStringEntry);
  1519     // Try converting double into integer.
  1520     if (isDouble.used() || handleStrings) {
  1521         if (isDouble.used()) {
  1522             bind(&isDouble);
  1523             unboxDouble(value, temp);
  1526         if (handleStrings)
  1527             bind(handleStringRejoin);
  1529         convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
  1530         jump(&done);
  1533     // Just unbox a bool, the result is 0 or 1.
  1534     if (isBool.used()) {
  1535         bind(&isBool);
  1536         unboxBoolean(value, output);
  1537         jump(&done);
  1540     // Integers can be unboxed.
  1541     if (isInt32.used()) {
  1542         bind(&isInt32);
  1543         unboxInt32(value, output);
  1544         if (behavior == IntConversion_ClampToUint8)
  1545             clampIntToUint8(output);
  1548     bind(&done);
  1551 bool
  1552 MacroAssembler::convertValueToInt(JSContext *cx, const Value &v, Register output, Label *fail,
  1553                                   IntConversionBehavior behavior)
  1555     bool handleStrings = (behavior == IntConversion_Truncate ||
  1556                           behavior == IntConversion_ClampToUint8);
  1558     if (v.isNumber() || (handleStrings && v.isString())) {
  1559         double d;
  1560         if (v.isNumber())
  1561             d = v.toNumber();
  1562         else if (!StringToNumber(cx, v.toString(), &d))
  1563             return false;
  1565         switch (behavior) {
  1566           case IntConversion_Normal:
  1567           case IntConversion_NegativeZeroCheck: {
  1568             // -0 is checked anyways if we have a constant value.
  1569             int i;
  1570             if (mozilla::NumberIsInt32(d, &i))
  1571                 move32(Imm32(i), output);
  1572             else
  1573                 jump(fail);
  1574             break;
  1576           case IntConversion_Truncate:
  1577             move32(Imm32(js::ToInt32(d)), output);
  1578             break;
  1579           case IntConversion_ClampToUint8:
  1580             move32(Imm32(ClampDoubleToUint8(d)), output);
  1581             break;
  1584         return true;
  1587     if (v.isBoolean()) {
  1588         move32(Imm32(v.toBoolean() ? 1 : 0), output);
  1589         return true;
  1592     if (v.isNull() || v.isUndefined()) {
  1593         move32(Imm32(0), output);
  1594         return true;
  1597     JS_ASSERT(v.isObject());
  1599     jump(fail);
  1600     return true;
  1603 bool
  1604 MacroAssembler::convertConstantOrRegisterToInt(JSContext *cx, ConstantOrRegister src,
  1605                                                FloatRegister temp, Register output,
  1606                                                Label *fail, IntConversionBehavior behavior)
  1608     if (src.constant())
  1609         return convertValueToInt(cx, src.value(), output, fail, behavior);
  1611     convertTypedOrValueToInt(src.reg(), temp, output, fail, behavior);
  1612     return true;
  1615 void
  1616 MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp,
  1617                                          Register output, Label *fail,
  1618                                          IntConversionBehavior behavior)
  1620     if (src.hasValue()) {
  1621         convertValueToInt(src.valueReg(), temp, output, fail, behavior);
  1622         return;
  1625     switch (src.type()) {
  1626       case MIRType_Undefined:
  1627       case MIRType_Null:
  1628         move32(Imm32(0), output);
  1629         break;
  1630       case MIRType_Boolean:
  1631       case MIRType_Int32:
  1632         if (src.typedReg().gpr() != output)
  1633             move32(src.typedReg().gpr(), output);
  1634         if (src.type() == MIRType_Int32 && behavior == IntConversion_ClampToUint8)
  1635             clampIntToUint8(output);
  1636         break;
  1637       case MIRType_Double:
  1638         convertDoubleToInt(src.typedReg().fpu(), output, temp, nullptr, fail, behavior);
  1639         break;
  1640       case MIRType_Float32:
  1641         // Conversion to Double simplifies implementation at the expense of performance.
  1642         convertFloat32ToDouble(src.typedReg().fpu(), temp);
  1643         convertDoubleToInt(temp, output, temp, nullptr, fail, behavior);
  1644         break;
  1645       case MIRType_String:
  1646       case MIRType_Object:
  1647         jump(fail);
  1648         break;
  1649       default:
  1650         MOZ_ASSUME_UNREACHABLE("Bad MIRType");
  1654 void
  1655 MacroAssembler::finish()
  1657     if (sequentialFailureLabel_.used()) {
  1658         bind(&sequentialFailureLabel_);
  1659         handleFailure(SequentialExecution);
  1661     if (parallelFailureLabel_.used()) {
  1662         bind(&parallelFailureLabel_);
  1663         handleFailure(ParallelExecution);
  1666     MacroAssemblerSpecific::finish();
  1669 void
  1670 MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label *label)
  1672     // 16-bit loads are slow and unaligned 32-bit loads may be too so
  1673     // perform an aligned 32-bit load and adjust the bitmask accordingly.
  1674     JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
  1675     JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
  1676     JS_STATIC_ASSERT(IS_LITTLE_ENDIAN);
  1678     // Emit code for the following test:
  1679     //
  1680     // bool isInterpretedConstructor() const {
  1681     //     return isInterpreted() && !isFunctionPrototype() && !isArrow() &&
  1682     //         (!isSelfHostedBuiltin() || isSelfHostedConstructor());
  1683     // }
  1685     // First, ensure it's a scripted function.
  1686     load32(Address(fun, JSFunction::offsetOfNargs()), scratch);
  1687     branchTest32(Assembler::Zero, scratch, Imm32(JSFunction::INTERPRETED << 16), label);
  1689     // Common case: if IS_FUN_PROTO, ARROW and SELF_HOSTED are not set,
  1690     // the function is an interpreted constructor and we're done.
  1691     Label done;
  1692     uint32_t bits = (JSFunction::IS_FUN_PROTO | JSFunction::ARROW | JSFunction::SELF_HOSTED) << 16;
  1693     branchTest32(Assembler::Zero, scratch, Imm32(bits), &done);
  1695         // The callee is either Function.prototype, an arrow function or
  1696         // self-hosted. None of these are constructible, except self-hosted
  1697         // constructors, so branch to |label| if SELF_HOSTED_CTOR is not set.
  1698         branchTest32(Assembler::Zero, scratch, Imm32(JSFunction::SELF_HOSTED_CTOR << 16), label);
  1700 #ifdef DEBUG
  1701         // Function.prototype should not have the SELF_HOSTED_CTOR flag.
  1702         branchTest32(Assembler::Zero, scratch, Imm32(JSFunction::IS_FUN_PROTO << 16), &done);
  1703         breakpoint();
  1704 #endif
  1706     bind(&done);
  1709 void
  1710 MacroAssembler::branchEqualTypeIfNeeded(MIRType type, MDefinition *maybeDef, Register tag,
  1711                                         Label *label)
  1713     if (!maybeDef || maybeDef->mightBeType(type)) {
  1714         switch (type) {
  1715           case MIRType_Null:
  1716             branchTestNull(Equal, tag, label);
  1717             break;
  1718           case MIRType_Boolean:
  1719             branchTestBoolean(Equal, tag, label);
  1720             break;
  1721           case MIRType_Int32:
  1722             branchTestInt32(Equal, tag, label);
  1723             break;
  1724           case MIRType_Double:
  1725             branchTestDouble(Equal, tag, label);
  1726             break;
  1727           case MIRType_String:
  1728             branchTestString(Equal, tag, label);
  1729             break;
  1730           case MIRType_Object:
  1731             branchTestObject(Equal, tag, label);
  1732             break;
  1733           default:
  1734             MOZ_ASSUME_UNREACHABLE("Unsupported type");
  1740 // If a pseudostack frame has this as its label, its stack pointer
  1741 // field points to the registers saved on entry to JIT code.  A native
  1742 // stack unwinder could use that information to continue unwinding
  1743 // past that point.
  1744 const char MacroAssembler::enterJitLabel[] = "EnterJIT";
  1746 // Creates an enterJIT pseudostack frame, as described above.  Pushes
  1747 // a word to the stack to indicate whether this was done.  |framePtr| is
  1748 // the pointer to the machine-dependent saved state.
  1749 void
  1750 MacroAssembler::spsMarkJit(SPSProfiler *p, Register framePtr, Register temp)
  1752     Label spsNotEnabled;
  1753     uint32_t *enabledAddr = p->addressOfEnabled();
  1754     load32(AbsoluteAddress(enabledAddr), temp);
  1755     push(temp); // +4: Did we push an sps frame.
  1756     branchTest32(Assembler::Equal, temp, temp, &spsNotEnabled);
  1758     Label stackFull;
  1759     // We always need the "safe" versions, because these are used in trampolines
  1760     // and won't be regenerated when SPS state changes.
  1761     spsProfileEntryAddressSafe(p, 0, temp, &stackFull);
  1763     storePtr(ImmPtr(enterJitLabel), Address(temp, ProfileEntry::offsetOfString()));
  1764     storePtr(framePtr,              Address(temp, ProfileEntry::offsetOfStackAddress()));
  1765     storePtr(ImmWord(uintptr_t(0)), Address(temp, ProfileEntry::offsetOfScript()));
  1766     store32(Imm32(ProfileEntry::NullPCIndex), Address(temp, ProfileEntry::offsetOfPCIdx()));
  1768     /* Always increment the stack size, whether or not we actually pushed. */
  1769     bind(&stackFull);
  1770     loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp);
  1771     add32(Imm32(1), Address(temp, 0));
  1773     bind(&spsNotEnabled);
  1776 // Pops the word pushed by spsMarkJit and, if spsMarkJit pushed an SPS
  1777 // frame, pops it.
  1778 void
  1779 MacroAssembler::spsUnmarkJit(SPSProfiler *p, Register temp)
  1781     Label spsNotEnabled;
  1782     pop(temp); // -4: Was the profiler enabled.
  1783     branchTest32(Assembler::Equal, temp, temp, &spsNotEnabled);
  1785     spsPopFrameSafe(p, temp);
  1787     bind(&spsNotEnabled);

mercurial