js/src/jit/IonMacroAssembler.cpp

changeset 0
6474c204b198
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/js/src/jit/IonMacroAssembler.cpp	Wed Dec 31 06:09:35 2014 +0100
     1.3 @@ -0,0 +1,1788 @@
     1.4 +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
     1.5 + * vim: set ts=8 sts=4 et sw=4 tw=99:
     1.6 + * This Source Code Form is subject to the terms of the Mozilla Public
     1.7 + * License, v. 2.0. If a copy of the MPL was not distributed with this
     1.8 + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
     1.9 +
    1.10 +#include "jit/IonMacroAssembler.h"
    1.11 +
    1.12 +#include "jsinfer.h"
    1.13 +#include "jsprf.h"
    1.14 +
    1.15 +#include "builtin/TypedObject.h"
    1.16 +#include "jit/Bailouts.h"
    1.17 +#include "jit/BaselineFrame.h"
    1.18 +#include "jit/BaselineIC.h"
    1.19 +#include "jit/BaselineJIT.h"
    1.20 +#include "jit/Lowering.h"
    1.21 +#include "jit/MIR.h"
    1.22 +#include "jit/ParallelFunctions.h"
    1.23 +#include "vm/ForkJoin.h"
    1.24 +#include "vm/TraceLogging.h"
    1.25 +
    1.26 +#ifdef JSGC_GENERATIONAL
    1.27 +# include "jsgcinlines.h"
    1.28 +#endif
    1.29 +#include "jsinferinlines.h"
    1.30 +#include "jsobjinlines.h"
    1.31 +
    1.32 +using namespace js;
    1.33 +using namespace js::jit;
    1.34 +
    1.35 +using JS::GenericNaN;
    1.36 +
    1.37 +namespace {
    1.38 +
    1.39 +// Emulate a TypeSet logic from a Type object to avoid duplicating the guard
    1.40 +// logic.
    1.41 +class TypeWrapper {
    1.42 +    types::Type t_;
    1.43 +
    1.44 +  public:
    1.45 +    TypeWrapper(types::Type t) : t_(t) {}
    1.46 +
    1.47 +    inline bool unknown() const {
    1.48 +        return t_.isUnknown();
    1.49 +    }
    1.50 +    inline bool hasType(types::Type t) const {
    1.51 +        if (t == types::Type::Int32Type())
    1.52 +            return t == t_ || t_ == types::Type::DoubleType();
    1.53 +        return t == t_;
    1.54 +    }
    1.55 +    inline unsigned getObjectCount() const {
    1.56 +        if (t_.isAnyObject() || t_.isUnknown() || !t_.isObject())
    1.57 +            return 0;
    1.58 +        return 1;
    1.59 +    }
    1.60 +    inline JSObject *getSingleObject(unsigned) const {
    1.61 +        if (t_.isSingleObject())
    1.62 +            return t_.singleObject();
    1.63 +        return nullptr;
    1.64 +    }
    1.65 +    inline types::TypeObject *getTypeObject(unsigned) const {
    1.66 +        if (t_.isTypeObject())
    1.67 +            return t_.typeObject();
    1.68 +        return nullptr;
    1.69 +    }
    1.70 +};
    1.71 +
    1.72 +} /* anonymous namespace */
    1.73 +
    1.74 +template <typename Source, typename TypeSet> void
    1.75 +MacroAssembler::guardTypeSet(const Source &address, const TypeSet *types,
    1.76 +                             Register scratch, Label *miss)
    1.77 +{
    1.78 +    JS_ASSERT(!types->unknown());
    1.79 +
    1.80 +    Label matched;
    1.81 +    types::Type tests[7] = {
    1.82 +        types::Type::Int32Type(),
    1.83 +        types::Type::UndefinedType(),
    1.84 +        types::Type::BooleanType(),
    1.85 +        types::Type::StringType(),
    1.86 +        types::Type::NullType(),
    1.87 +        types::Type::MagicArgType(),
    1.88 +        types::Type::AnyObjectType()
    1.89 +    };
    1.90 +
    1.91 +    // The double type also implies Int32.
    1.92 +    // So replace the int32 test with the double one.
    1.93 +    if (types->hasType(types::Type::DoubleType())) {
    1.94 +        JS_ASSERT(types->hasType(types::Type::Int32Type()));
    1.95 +        tests[0] = types::Type::DoubleType();
    1.96 +    }
    1.97 +
    1.98 +    Register tag = extractTag(address, scratch);
    1.99 +
   1.100 +    // Emit all typed tests.
   1.101 +    BranchType lastBranch;
   1.102 +    for (size_t i = 0; i < 7; i++) {
   1.103 +        if (!types->hasType(tests[i]))
   1.104 +            continue;
   1.105 +
   1.106 +        if (lastBranch.isInitialized())
   1.107 +            lastBranch.emit(*this);
   1.108 +        lastBranch = BranchType(Equal, tag, tests[i], &matched);
   1.109 +    }
   1.110 +
   1.111 +    // If this is the last check, invert the last branch.
   1.112 +    if (types->hasType(types::Type::AnyObjectType()) || !types->getObjectCount()) {
   1.113 +        if (!lastBranch.isInitialized()) {
   1.114 +            jump(miss);
   1.115 +            return;
   1.116 +        }
   1.117 +
   1.118 +        lastBranch.invertCondition();
   1.119 +        lastBranch.relink(miss);
   1.120 +        lastBranch.emit(*this);
   1.121 +
   1.122 +        bind(&matched);
   1.123 +        return;
   1.124 +    }
   1.125 +
   1.126 +    if (lastBranch.isInitialized())
   1.127 +        lastBranch.emit(*this);
   1.128 +
   1.129 +    // Test specific objects.
   1.130 +    JS_ASSERT(scratch != InvalidReg);
   1.131 +    branchTestObject(NotEqual, tag, miss);
   1.132 +    Register obj = extractObject(address, scratch);
   1.133 +    guardObjectType(obj, types, scratch, miss);
   1.134 +
   1.135 +    bind(&matched);
   1.136 +}
   1.137 +
   1.138 +template <typename TypeSet> void
   1.139 +MacroAssembler::guardObjectType(Register obj, const TypeSet *types,
   1.140 +                                Register scratch, Label *miss)
   1.141 +{
   1.142 +    JS_ASSERT(!types->unknown());
   1.143 +    JS_ASSERT(!types->hasType(types::Type::AnyObjectType()));
   1.144 +    JS_ASSERT(types->getObjectCount());
   1.145 +    JS_ASSERT(scratch != InvalidReg);
   1.146 +
   1.147 +    Label matched;
   1.148 +
   1.149 +    BranchGCPtr lastBranch;
   1.150 +    JS_ASSERT(!lastBranch.isInitialized());
   1.151 +    bool hasTypeObjects = false;
   1.152 +    unsigned count = types->getObjectCount();
   1.153 +    for (unsigned i = 0; i < count; i++) {
   1.154 +        if (!types->getSingleObject(i)) {
   1.155 +            hasTypeObjects = hasTypeObjects || types->getTypeObject(i);
   1.156 +            continue;
   1.157 +        }
   1.158 +
   1.159 +        if (lastBranch.isInitialized())
   1.160 +            lastBranch.emit(*this);
   1.161 +
   1.162 +        JSObject *object = types->getSingleObject(i);
   1.163 +        lastBranch = BranchGCPtr(Equal, obj, ImmGCPtr(object), &matched);
   1.164 +    }
   1.165 +
   1.166 +    if (hasTypeObjects) {
   1.167 +        // We are possibly going to overwrite the obj register. So already
   1.168 +        // emit the branch, since branch depends on previous value of obj
   1.169 +        // register and there is definitely a branch following. So no need
   1.170 +        // to invert the condition.
   1.171 +        if (lastBranch.isInitialized())
   1.172 +            lastBranch.emit(*this);
   1.173 +        lastBranch = BranchGCPtr();
   1.174 +
   1.175 +        // Note: Some platforms give the same register for obj and scratch.
   1.176 +        // Make sure when writing to scratch, the obj register isn't used anymore!
   1.177 +        loadPtr(Address(obj, JSObject::offsetOfType()), scratch);
   1.178 +
   1.179 +        for (unsigned i = 0; i < count; i++) {
   1.180 +            if (!types->getTypeObject(i))
   1.181 +                continue;
   1.182 +
   1.183 +            if (lastBranch.isInitialized())
   1.184 +                lastBranch.emit(*this);
   1.185 +
   1.186 +            types::TypeObject *object = types->getTypeObject(i);
   1.187 +            lastBranch = BranchGCPtr(Equal, scratch, ImmGCPtr(object), &matched);
   1.188 +        }
   1.189 +    }
   1.190 +
   1.191 +    if (!lastBranch.isInitialized()) {
   1.192 +        jump(miss);
   1.193 +        return;
   1.194 +    }
   1.195 +
   1.196 +    lastBranch.invertCondition();
   1.197 +    lastBranch.relink(miss);
   1.198 +    lastBranch.emit(*this);
   1.199 +
   1.200 +    bind(&matched);
   1.201 +    return;
   1.202 +}
   1.203 +
   1.204 +template <typename Source> void
   1.205 +MacroAssembler::guardType(const Source &address, types::Type type,
   1.206 +                          Register scratch, Label *miss)
   1.207 +{
   1.208 +    TypeWrapper wrapper(type);
   1.209 +    guardTypeSet(address, &wrapper, scratch, miss);
   1.210 +}
   1.211 +
   1.212 +template void MacroAssembler::guardTypeSet(const Address &address, const types::TemporaryTypeSet *types,
   1.213 +                                           Register scratch, Label *miss);
   1.214 +template void MacroAssembler::guardTypeSet(const ValueOperand &value, const types::TemporaryTypeSet *types,
   1.215 +                                           Register scratch, Label *miss);
   1.216 +
   1.217 +template void MacroAssembler::guardTypeSet(const Address &address, const types::HeapTypeSet *types,
   1.218 +                                           Register scratch, Label *miss);
   1.219 +template void MacroAssembler::guardTypeSet(const ValueOperand &value, const types::HeapTypeSet *types,
   1.220 +                                           Register scratch, Label *miss);
   1.221 +template void MacroAssembler::guardTypeSet(const TypedOrValueRegister &reg, const types::HeapTypeSet *types,
   1.222 +                                           Register scratch, Label *miss);
   1.223 +
   1.224 +template void MacroAssembler::guardTypeSet(const Address &address, const types::TypeSet *types,
   1.225 +                                           Register scratch, Label *miss);
   1.226 +template void MacroAssembler::guardTypeSet(const ValueOperand &value, const types::TypeSet *types,
   1.227 +                                           Register scratch, Label *miss);
   1.228 +
   1.229 +template void MacroAssembler::guardTypeSet(const Address &address, const TypeWrapper *types,
   1.230 +                                           Register scratch, Label *miss);
   1.231 +template void MacroAssembler::guardTypeSet(const ValueOperand &value, const TypeWrapper *types,
   1.232 +                                           Register scratch, Label *miss);
   1.233 +
   1.234 +template void MacroAssembler::guardObjectType(Register obj, const types::TemporaryTypeSet *types,
   1.235 +                                              Register scratch, Label *miss);
   1.236 +template void MacroAssembler::guardObjectType(Register obj, const types::TypeSet *types,
   1.237 +                                              Register scratch, Label *miss);
   1.238 +template void MacroAssembler::guardObjectType(Register obj, const TypeWrapper *types,
   1.239 +                                              Register scratch, Label *miss);
   1.240 +
   1.241 +template void MacroAssembler::guardType(const Address &address, types::Type type,
   1.242 +                                        Register scratch, Label *miss);
   1.243 +template void MacroAssembler::guardType(const ValueOperand &value, types::Type type,
   1.244 +                                        Register scratch, Label *miss);
   1.245 +
   1.246 +void
   1.247 +MacroAssembler::branchNurseryPtr(Condition cond, const Address &ptr1, const ImmMaybeNurseryPtr &ptr2,
   1.248 +                                 Label *label)
   1.249 +{
   1.250 +#ifdef JSGC_GENERATIONAL
   1.251 +    if (ptr2.value && gc::IsInsideNursery(GetIonContext()->cx->runtime(), (void *)ptr2.value))
   1.252 +        embedsNurseryPointers_ = true;
   1.253 +#endif
   1.254 +    branchPtr(cond, ptr1, ptr2, label);
   1.255 +}
   1.256 +
   1.257 +void
   1.258 +MacroAssembler::moveNurseryPtr(const ImmMaybeNurseryPtr &ptr, Register reg)
   1.259 +{
   1.260 +#ifdef JSGC_GENERATIONAL
   1.261 +    if (ptr.value && gc::IsInsideNursery(GetIonContext()->cx->runtime(), (void *)ptr.value))
   1.262 +        embedsNurseryPointers_ = true;
   1.263 +#endif
   1.264 +    movePtr(ptr, reg);
   1.265 +}
   1.266 +
   1.267 +template<typename S, typename T>
   1.268 +static void
   1.269 +StoreToTypedFloatArray(MacroAssembler &masm, int arrayType, const S &value, const T &dest)
   1.270 +{
   1.271 +    switch (arrayType) {
   1.272 +      case ScalarTypeDescr::TYPE_FLOAT32:
   1.273 +        if (LIRGenerator::allowFloat32Optimizations()) {
   1.274 +            masm.storeFloat32(value, dest);
   1.275 +        } else {
   1.276 +#ifdef JS_MORE_DETERMINISTIC
   1.277 +            // See the comment in TypedArrayObjectTemplate::doubleToNative.
   1.278 +            masm.canonicalizeDouble(value);
   1.279 +#endif
   1.280 +            masm.convertDoubleToFloat32(value, ScratchFloatReg);
   1.281 +            masm.storeFloat32(ScratchFloatReg, dest);
   1.282 +        }
   1.283 +        break;
   1.284 +      case ScalarTypeDescr::TYPE_FLOAT64:
   1.285 +#ifdef JS_MORE_DETERMINISTIC
   1.286 +        // See the comment in TypedArrayObjectTemplate::doubleToNative.
   1.287 +        masm.canonicalizeDouble(value);
   1.288 +#endif
   1.289 +        masm.storeDouble(value, dest);
   1.290 +        break;
   1.291 +      default:
   1.292 +        MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
   1.293 +    }
   1.294 +}
   1.295 +
   1.296 +void
   1.297 +MacroAssembler::storeToTypedFloatArray(int arrayType, const FloatRegister &value,
   1.298 +                                       const BaseIndex &dest)
   1.299 +{
   1.300 +    StoreToTypedFloatArray(*this, arrayType, value, dest);
   1.301 +}
   1.302 +void
   1.303 +MacroAssembler::storeToTypedFloatArray(int arrayType, const FloatRegister &value,
   1.304 +                                       const Address &dest)
   1.305 +{
   1.306 +    StoreToTypedFloatArray(*this, arrayType, value, dest);
   1.307 +}
   1.308 +
   1.309 +template<typename T>
   1.310 +void
   1.311 +MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp,
   1.312 +                                   Label *fail)
   1.313 +{
   1.314 +    switch (arrayType) {
   1.315 +      case ScalarTypeDescr::TYPE_INT8:
   1.316 +        load8SignExtend(src, dest.gpr());
   1.317 +        break;
   1.318 +      case ScalarTypeDescr::TYPE_UINT8:
   1.319 +      case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
   1.320 +        load8ZeroExtend(src, dest.gpr());
   1.321 +        break;
   1.322 +      case ScalarTypeDescr::TYPE_INT16:
   1.323 +        load16SignExtend(src, dest.gpr());
   1.324 +        break;
   1.325 +      case ScalarTypeDescr::TYPE_UINT16:
   1.326 +        load16ZeroExtend(src, dest.gpr());
   1.327 +        break;
   1.328 +      case ScalarTypeDescr::TYPE_INT32:
   1.329 +        load32(src, dest.gpr());
   1.330 +        break;
   1.331 +      case ScalarTypeDescr::TYPE_UINT32:
   1.332 +        if (dest.isFloat()) {
   1.333 +            load32(src, temp);
   1.334 +            convertUInt32ToDouble(temp, dest.fpu());
   1.335 +        } else {
   1.336 +            load32(src, dest.gpr());
   1.337 +
   1.338 +            // Bail out if the value doesn't fit into a signed int32 value. This
   1.339 +            // is what allows MLoadTypedArrayElement to have a type() of
   1.340 +            // MIRType_Int32 for UInt32 array loads.
   1.341 +            branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
   1.342 +        }
   1.343 +        break;
   1.344 +      case ScalarTypeDescr::TYPE_FLOAT32:
   1.345 +        if (LIRGenerator::allowFloat32Optimizations()) {
   1.346 +            loadFloat32(src, dest.fpu());
   1.347 +            canonicalizeFloat(dest.fpu());
   1.348 +        } else {
   1.349 +            loadFloatAsDouble(src, dest.fpu());
   1.350 +            canonicalizeDouble(dest.fpu());
   1.351 +        }
   1.352 +        break;
   1.353 +      case ScalarTypeDescr::TYPE_FLOAT64:
   1.354 +        loadDouble(src, dest.fpu());
   1.355 +        canonicalizeDouble(dest.fpu());
   1.356 +        break;
   1.357 +      default:
   1.358 +        MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
   1.359 +    }
   1.360 +}
   1.361 +
   1.362 +template void MacroAssembler::loadFromTypedArray(int arrayType, const Address &src, AnyRegister dest,
   1.363 +                                                 Register temp, Label *fail);
   1.364 +template void MacroAssembler::loadFromTypedArray(int arrayType, const BaseIndex &src, AnyRegister dest,
   1.365 +                                                 Register temp, Label *fail);
   1.366 +
   1.367 +template<typename T>
   1.368 +void
   1.369 +MacroAssembler::loadFromTypedArray(int arrayType, const T &src, const ValueOperand &dest,
   1.370 +                                   bool allowDouble, Register temp, Label *fail)
   1.371 +{
   1.372 +    switch (arrayType) {
   1.373 +      case ScalarTypeDescr::TYPE_INT8:
   1.374 +      case ScalarTypeDescr::TYPE_UINT8:
   1.375 +      case ScalarTypeDescr::TYPE_UINT8_CLAMPED:
   1.376 +      case ScalarTypeDescr::TYPE_INT16:
   1.377 +      case ScalarTypeDescr::TYPE_UINT16:
   1.378 +      case ScalarTypeDescr::TYPE_INT32:
   1.379 +        loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()), InvalidReg, nullptr);
   1.380 +        tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
   1.381 +        break;
   1.382 +      case ScalarTypeDescr::TYPE_UINT32:
   1.383 +        // Don't clobber dest when we could fail, instead use temp.
   1.384 +        load32(src, temp);
   1.385 +        if (allowDouble) {
   1.386 +            // If the value fits in an int32, store an int32 type tag.
   1.387 +            // Else, convert the value to double and box it.
   1.388 +            Label done, isDouble;
   1.389 +            branchTest32(Assembler::Signed, temp, temp, &isDouble);
   1.390 +            {
   1.391 +                tagValue(JSVAL_TYPE_INT32, temp, dest);
   1.392 +                jump(&done);
   1.393 +            }
   1.394 +            bind(&isDouble);
   1.395 +            {
   1.396 +                convertUInt32ToDouble(temp, ScratchFloatReg);
   1.397 +                boxDouble(ScratchFloatReg, dest);
   1.398 +            }
   1.399 +            bind(&done);
   1.400 +        } else {
   1.401 +            // Bailout if the value does not fit in an int32.
   1.402 +            branchTest32(Assembler::Signed, temp, temp, fail);
   1.403 +            tagValue(JSVAL_TYPE_INT32, temp, dest);
   1.404 +        }
   1.405 +        break;
   1.406 +      case ScalarTypeDescr::TYPE_FLOAT32:
   1.407 +        loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloatReg), dest.scratchReg(),
   1.408 +                           nullptr);
   1.409 +        if (LIRGenerator::allowFloat32Optimizations())
   1.410 +            convertFloat32ToDouble(ScratchFloatReg, ScratchFloatReg);
   1.411 +        boxDouble(ScratchFloatReg, dest);
   1.412 +        break;
   1.413 +      case ScalarTypeDescr::TYPE_FLOAT64:
   1.414 +        loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloatReg), dest.scratchReg(),
   1.415 +                           nullptr);
   1.416 +        boxDouble(ScratchFloatReg, dest);
   1.417 +        break;
   1.418 +      default:
   1.419 +        MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
   1.420 +    }
   1.421 +}
   1.422 +
   1.423 +template void MacroAssembler::loadFromTypedArray(int arrayType, const Address &src, const ValueOperand &dest,
   1.424 +                                                 bool allowDouble, Register temp, Label *fail);
   1.425 +template void MacroAssembler::loadFromTypedArray(int arrayType, const BaseIndex &src, const ValueOperand &dest,
   1.426 +                                                 bool allowDouble, Register temp, Label *fail);
   1.427 +
   1.428 +void
   1.429 +MacroAssembler::newGCThing(Register result, Register temp, gc::AllocKind allocKind, Label *fail,
   1.430 +                           gc::InitialHeap initialHeap /* = gc::DefaultHeap */)
   1.431 +{
   1.432 +    // Inlined equivalent of js::gc::NewGCThing() without failure case handling.
   1.433 +
   1.434 +    int thingSize = int(gc::Arena::thingSize(allocKind));
   1.435 +
   1.436 +#ifdef JS_GC_ZEAL
   1.437 +    // Don't execute the inline path if gcZeal is active.
   1.438 +    branch32(Assembler::NotEqual,
   1.439 +             AbsoluteAddress(GetIonContext()->runtime->addressOfGCZeal()), Imm32(0),
   1.440 +             fail);
   1.441 +#endif
   1.442 +
   1.443 +    // Don't execute the inline path if the compartment has an object metadata callback,
   1.444 +    // as the metadata to use for the object may vary between executions of the op.
   1.445 +    if (GetIonContext()->compartment->hasObjectMetadataCallback())
   1.446 +        jump(fail);
   1.447 +
   1.448 +#ifdef JSGC_GENERATIONAL
   1.449 +    // Always use nursery allocation if it is possible to do so. The jit
   1.450 +    // assumes a nursery pointer is returned to avoid barriers.
   1.451 +    if (allocKind <= gc::FINALIZE_OBJECT_LAST && initialHeap != gc::TenuredHeap) {
   1.452 +        // Inline Nursery::allocate. No explicit check for nursery.isEnabled()
   1.453 +        // is needed, as the comparison with the nursery's end will always fail
   1.454 +        // in such cases.
   1.455 +        const Nursery &nursery = GetIonContext()->runtime->gcNursery();
   1.456 +        loadPtr(AbsoluteAddress(nursery.addressOfPosition()), result);
   1.457 +        computeEffectiveAddress(Address(result, thingSize), temp);
   1.458 +        branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(nursery.addressOfCurrentEnd()), temp, fail);
   1.459 +        storePtr(temp, AbsoluteAddress(nursery.addressOfPosition()));
   1.460 +        return;
   1.461 +    }
   1.462 +#endif // JSGC_GENERATIONAL
   1.463 +
   1.464 +    CompileZone *zone = GetIonContext()->compartment->zone();
   1.465 +
   1.466 +    // Inline FreeSpan::allocate.
   1.467 +    // There is always exactly one FreeSpan per allocKind per JSCompartment.
   1.468 +    // If a FreeSpan is replaced, its members are updated in the freeLists table,
   1.469 +    // which the code below always re-reads.
   1.470 +    loadPtr(AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)), result);
   1.471 +    branchPtr(Assembler::BelowOrEqual, AbsoluteAddress(zone->addressOfFreeListLast(allocKind)), result, fail);
   1.472 +    computeEffectiveAddress(Address(result, thingSize), temp);
   1.473 +    storePtr(temp, AbsoluteAddress(zone->addressOfFreeListFirst(allocKind)));
   1.474 +}
   1.475 +
   1.476 +void
   1.477 +MacroAssembler::newGCThing(Register result, Register temp, JSObject *templateObject, Label *fail,
   1.478 +                           gc::InitialHeap initialHeap)
   1.479 +{
   1.480 +    gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
   1.481 +    JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
   1.482 +
   1.483 +    newGCThing(result, temp, allocKind, fail, initialHeap);
   1.484 +}
   1.485 +
   1.486 +void
   1.487 +MacroAssembler::newGCString(Register result, Register temp, Label *fail)
   1.488 +{
   1.489 +    newGCThing(result, temp, js::gc::FINALIZE_STRING, fail);
   1.490 +}
   1.491 +
   1.492 +void
   1.493 +MacroAssembler::newGCFatInlineString(Register result, Register temp, Label *fail)
   1.494 +{
   1.495 +    newGCThing(result, temp, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
   1.496 +}
   1.497 +
   1.498 +void
   1.499 +MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   1.500 +                              gc::AllocKind allocKind, Label *fail)
   1.501 +{
   1.502 +    // Similar to ::newGCThing(), except that it allocates from a custom
   1.503 +    // Allocator in the ForkJoinContext*, rather than being hardcoded to the
   1.504 +    // compartment allocator.  This requires two temporary registers.
   1.505 +    //
   1.506 +    // Subtle: I wanted to reuse `result` for one of the temporaries, but the
   1.507 +    // register allocator was assigning it to the same register as `cx`.
   1.508 +    // Then we overwrite that register which messed up the OOL code.
   1.509 +
   1.510 +    uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
   1.511 +
   1.512 +    // Load the allocator:
   1.513 +    // tempReg1 = (Allocator*) forkJoinCx->allocator()
   1.514 +    loadPtr(Address(cx, ThreadSafeContext::offsetOfAllocator()),
   1.515 +            tempReg1);
   1.516 +
   1.517 +    // Get a pointer to the relevant free list:
   1.518 +    // tempReg1 = (FreeSpan*) &tempReg1->arenas.freeLists[(allocKind)]
   1.519 +    uint32_t offset = (offsetof(Allocator, arenas) +
   1.520 +                       js::gc::ArenaLists::getFreeListOffset(allocKind));
   1.521 +    addPtr(Imm32(offset), tempReg1);
   1.522 +
   1.523 +    // Load first item on the list
   1.524 +    // tempReg2 = tempReg1->first
   1.525 +    loadPtr(Address(tempReg1, offsetof(gc::FreeSpan, first)), tempReg2);
   1.526 +
   1.527 +    // Check whether list is empty
   1.528 +    // if tempReg1->last <= tempReg2, fail
   1.529 +    branchPtr(Assembler::BelowOrEqual,
   1.530 +              Address(tempReg1, offsetof(gc::FreeSpan, last)),
   1.531 +              tempReg2,
   1.532 +              fail);
   1.533 +
   1.534 +    // If not, take first and advance pointer by thingSize bytes.
   1.535 +    // result = tempReg2;
   1.536 +    // tempReg2 += thingSize;
   1.537 +    movePtr(tempReg2, result);
   1.538 +    addPtr(Imm32(thingSize), tempReg2);
   1.539 +
   1.540 +    // Update `first`
   1.541 +    // tempReg1->first = tempReg2;
   1.542 +    storePtr(tempReg2, Address(tempReg1, offsetof(gc::FreeSpan, first)));
   1.543 +}
   1.544 +
   1.545 +void
   1.546 +MacroAssembler::newGCThingPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   1.547 +                              JSObject *templateObject, Label *fail)
   1.548 +{
   1.549 +    gc::AllocKind allocKind = templateObject->tenuredGetAllocKind();
   1.550 +    JS_ASSERT(allocKind >= gc::FINALIZE_OBJECT0 && allocKind <= gc::FINALIZE_OBJECT_LAST);
   1.551 +
   1.552 +    newGCThingPar(result, cx, tempReg1, tempReg2, allocKind, fail);
   1.553 +}
   1.554 +
   1.555 +void
   1.556 +MacroAssembler::newGCStringPar(Register result, Register cx, Register tempReg1, Register tempReg2,
   1.557 +                               Label *fail)
   1.558 +{
   1.559 +    newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_STRING, fail);
   1.560 +}
   1.561 +
   1.562 +void
   1.563 +MacroAssembler::newGCFatInlineStringPar(Register result, Register cx, Register tempReg1,
   1.564 +                                        Register tempReg2, Label *fail)
   1.565 +{
   1.566 +    newGCThingPar(result, cx, tempReg1, tempReg2, js::gc::FINALIZE_FAT_INLINE_STRING, fail);
   1.567 +}
   1.568 +
   1.569 +void
   1.570 +MacroAssembler::copySlotsFromTemplate(Register obj, Register temp, const JSObject *templateObj,
   1.571 +                                      uint32_t start, uint32_t end)
   1.572 +{
   1.573 +    uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
   1.574 +    for (unsigned i = start; i < nfixed; i++)
   1.575 +        storeValue(templateObj->getFixedSlot(i), Address(obj, JSObject::getFixedSlotOffset(i)));
   1.576 +}
   1.577 +
   1.578 +void
   1.579 +MacroAssembler::fillSlotsWithUndefined(Register obj, Register temp, const JSObject *templateObj,
   1.580 +                                       uint32_t start, uint32_t end)
   1.581 +{
   1.582 +#ifdef JS_NUNBOX32
   1.583 +    // We only have a single spare register, so do the initialization as two
   1.584 +    // strided writes of the tag and body.
   1.585 +    jsval_layout jv = JSVAL_TO_IMPL(UndefinedValue());
   1.586 +    uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
   1.587 +
   1.588 +    mov(ImmWord(jv.s.tag), temp);
   1.589 +    for (unsigned i = start; i < nfixed; i++)
   1.590 +        store32(temp, ToType(Address(obj, JSObject::getFixedSlotOffset(i))));
   1.591 +
   1.592 +    mov(ImmWord(jv.s.payload.i32), temp);
   1.593 +    for (unsigned i = start; i < nfixed; i++)
   1.594 +        store32(temp, ToPayload(Address(obj, JSObject::getFixedSlotOffset(i))));
   1.595 +#else
   1.596 +    moveValue(UndefinedValue(), temp);
   1.597 +    uint32_t nfixed = Min(templateObj->numFixedSlots(), end);
   1.598 +    for (unsigned i = start; i < nfixed; i++)
   1.599 +        storePtr(temp, Address(obj, JSObject::getFixedSlotOffset(i)));
   1.600 +#endif
   1.601 +}
   1.602 +
   1.603 +static uint32_t
   1.604 +FindStartOfUndefinedSlots(JSObject *templateObj, uint32_t nslots)
   1.605 +{
   1.606 +    JS_ASSERT(nslots == templateObj->lastProperty()->slotSpan(templateObj->getClass()));
   1.607 +    JS_ASSERT(nslots > 0);
   1.608 +    for (uint32_t first = nslots; first != 0; --first) {
   1.609 +        if (templateObj->getSlot(first - 1) != UndefinedValue())
   1.610 +            return first;
   1.611 +    }
   1.612 +    return 0;
   1.613 +}
   1.614 +
   1.615 +void
   1.616 +MacroAssembler::initGCSlots(Register obj, Register temp, JSObject *templateObj)
   1.617 +{
   1.618 +    // Slots of non-array objects are required to be initialized.
   1.619 +    // Use the values currently in the template object.
   1.620 +    uint32_t nslots = templateObj->lastProperty()->slotSpan(templateObj->getClass());
   1.621 +    if (nslots == 0)
   1.622 +        return;
   1.623 +
   1.624 +    // Attempt to group slot writes such that we minimize the amount of
   1.625 +    // duplicated data we need to embed in code and load into registers. In
   1.626 +    // general, most template object slots will be undefined except for any
   1.627 +    // reserved slots. Since reserved slots come first, we split the object
   1.628 +    // logically into independent non-UndefinedValue writes to the head and
   1.629 +    // duplicated writes of UndefinedValue to the tail. For the majority of
   1.630 +    // objects, the "tail" will be the entire slot range.
   1.631 +    uint32_t startOfUndefined = FindStartOfUndefinedSlots(templateObj, nslots);
   1.632 +    copySlotsFromTemplate(obj, temp, templateObj, 0, startOfUndefined);
   1.633 +    fillSlotsWithUndefined(obj, temp, templateObj, startOfUndefined, nslots);
   1.634 +}
   1.635 +
   1.636 +void
   1.637 +MacroAssembler::initGCThing(Register obj, Register temp, JSObject *templateObj)
   1.638 +{
   1.639 +    // Fast initialization of an empty object returned by NewGCThing().
   1.640 +
   1.641 +    JS_ASSERT(!templateObj->hasDynamicElements());
   1.642 +
   1.643 +    storePtr(ImmGCPtr(templateObj->lastProperty()), Address(obj, JSObject::offsetOfShape()));
   1.644 +    storePtr(ImmGCPtr(templateObj->type()), Address(obj, JSObject::offsetOfType()));
   1.645 +    storePtr(ImmPtr(nullptr), Address(obj, JSObject::offsetOfSlots()));
   1.646 +
   1.647 +    if (templateObj->is<ArrayObject>()) {
   1.648 +        JS_ASSERT(!templateObj->getDenseInitializedLength());
   1.649 +
   1.650 +        int elementsOffset = JSObject::offsetOfFixedElements();
   1.651 +
   1.652 +        computeEffectiveAddress(Address(obj, elementsOffset), temp);
   1.653 +        storePtr(temp, Address(obj, JSObject::offsetOfElements()));
   1.654 +
   1.655 +        // Fill in the elements header.
   1.656 +        store32(Imm32(templateObj->getDenseCapacity()),
   1.657 +                Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
   1.658 +        store32(Imm32(templateObj->getDenseInitializedLength()),
   1.659 +                Address(obj, elementsOffset + ObjectElements::offsetOfInitializedLength()));
   1.660 +        store32(Imm32(templateObj->as<ArrayObject>().length()),
   1.661 +                Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
   1.662 +        store32(Imm32(templateObj->shouldConvertDoubleElements()
   1.663 +                      ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
   1.664 +                      : 0),
   1.665 +                Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
   1.666 +        JS_ASSERT(!templateObj->hasPrivate());
   1.667 +    } else {
   1.668 +        storePtr(ImmPtr(emptyObjectElements), Address(obj, JSObject::offsetOfElements()));
   1.669 +
   1.670 +        initGCSlots(obj, temp, templateObj);
   1.671 +
   1.672 +        if (templateObj->hasPrivate()) {
   1.673 +            uint32_t nfixed = templateObj->numFixedSlots();
   1.674 +            storePtr(ImmPtr(templateObj->getPrivate()),
   1.675 +                     Address(obj, JSObject::getPrivateDataOffset(nfixed)));
   1.676 +        }
   1.677 +    }
   1.678 +}
   1.679 +
   1.680 +void
   1.681 +MacroAssembler::compareStrings(JSOp op, Register left, Register right, Register result,
   1.682 +                               Register temp, Label *fail)
   1.683 +{
   1.684 +    JS_ASSERT(IsEqualityOp(op));
   1.685 +
   1.686 +    Label done;
   1.687 +    Label notPointerEqual;
   1.688 +    // Fast path for identical strings.
   1.689 +    branchPtr(Assembler::NotEqual, left, right, &notPointerEqual);
   1.690 +    move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), result);
   1.691 +    jump(&done);
   1.692 +
   1.693 +    bind(&notPointerEqual);
   1.694 +    loadPtr(Address(left, JSString::offsetOfLengthAndFlags()), result);
   1.695 +    loadPtr(Address(right, JSString::offsetOfLengthAndFlags()), temp);
   1.696 +
   1.697 +    Label notAtom;
   1.698 +    // Optimize the equality operation to a pointer compare for two atoms.
   1.699 +    Imm32 atomBit(JSString::ATOM_BIT);
   1.700 +    branchTest32(Assembler::Zero, result, atomBit, &notAtom);
   1.701 +    branchTest32(Assembler::Zero, temp, atomBit, &notAtom);
   1.702 +
   1.703 +    cmpPtrSet(JSOpToCondition(MCompare::Compare_String, op), left, right, result);
   1.704 +    jump(&done);
   1.705 +
   1.706 +    bind(&notAtom);
   1.707 +    // Strings of different length can never be equal.
   1.708 +    rshiftPtr(Imm32(JSString::LENGTH_SHIFT), result);
   1.709 +    rshiftPtr(Imm32(JSString::LENGTH_SHIFT), temp);
   1.710 +    branchPtr(Assembler::Equal, result, temp, fail);
   1.711 +    move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
   1.712 +
   1.713 +    bind(&done);
   1.714 +}
   1.715 +
   1.716 +void
   1.717 +MacroAssembler::checkInterruptFlagPar(Register tempReg, Label *fail)
   1.718 +{
   1.719 +#ifdef JS_THREADSAFE
   1.720 +    movePtr(ImmPtr(GetIonContext()->runtime->addressOfInterruptPar()), tempReg);
   1.721 +    branch32(Assembler::NonZero, Address(tempReg, 0), Imm32(0), fail);
   1.722 +#else
   1.723 +    MOZ_ASSUME_UNREACHABLE("JSRuntime::interruptPar doesn't exist on non-threadsafe builds.");
   1.724 +#endif
   1.725 +}
   1.726 +
   1.727 +static void
   1.728 +ReportOverRecursed(JSContext *cx)
   1.729 +{
   1.730 +    js_ReportOverRecursed(cx);
   1.731 +}
   1.732 +
   1.733 +void
   1.734 +MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
   1.735 +{
   1.736 +    enterExitFrame();
   1.737 +
   1.738 +    Label baseline;
   1.739 +
   1.740 +    // The return value from Bailout is tagged as:
   1.741 +    // - 0x0: done (enter baseline)
   1.742 +    // - 0x1: error (handle exception)
   1.743 +    // - 0x2: overrecursed
   1.744 +    JS_STATIC_ASSERT(BAILOUT_RETURN_OK == 0);
   1.745 +    JS_STATIC_ASSERT(BAILOUT_RETURN_FATAL_ERROR == 1);
   1.746 +    JS_STATIC_ASSERT(BAILOUT_RETURN_OVERRECURSED == 2);
   1.747 +
   1.748 +    branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_OK), &baseline);
   1.749 +    branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_FATAL_ERROR), exceptionLabel());
   1.750 +
   1.751 +    // Fall-through: overrecursed.
   1.752 +    {
   1.753 +        loadJSContext(ReturnReg);
   1.754 +        setupUnalignedABICall(1, scratch);
   1.755 +        passABIArg(ReturnReg);
   1.756 +        callWithABI(JS_FUNC_TO_DATA_PTR(void *, ReportOverRecursed));
   1.757 +        jump(exceptionLabel());
   1.758 +    }
   1.759 +
   1.760 +    bind(&baseline);
   1.761 +    {
   1.762 +        // Prepare a register set for use in this case.
   1.763 +        GeneralRegisterSet regs(GeneralRegisterSet::All());
   1.764 +        JS_ASSERT(!regs.has(BaselineStackReg));
   1.765 +        regs.take(bailoutInfo);
   1.766 +
   1.767 +        // Reset SP to the point where clobbering starts.
   1.768 +        loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)),
   1.769 +                BaselineStackReg);
   1.770 +
   1.771 +        Register copyCur = regs.takeAny();
   1.772 +        Register copyEnd = regs.takeAny();
   1.773 +        Register temp = regs.takeAny();
   1.774 +
   1.775 +        // Copy data onto stack.
   1.776 +        loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)), copyCur);
   1.777 +        loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)), copyEnd);
   1.778 +        {
   1.779 +            Label copyLoop;
   1.780 +            Label endOfCopy;
   1.781 +            bind(&copyLoop);
   1.782 +            branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
   1.783 +            subPtr(Imm32(4), copyCur);
   1.784 +            subPtr(Imm32(4), BaselineStackReg);
   1.785 +            load32(Address(copyCur, 0), temp);
   1.786 +            store32(temp, Address(BaselineStackReg, 0));
   1.787 +            jump(&copyLoop);
   1.788 +            bind(&endOfCopy);
   1.789 +        }
   1.790 +
   1.791 +        // Enter exit frame for the FinishBailoutToBaseline call.
   1.792 +        loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)), temp);
   1.793 +        load32(Address(temp, BaselineFrame::reverseOffsetOfFrameSize()), temp);
   1.794 +        makeFrameDescriptor(temp, JitFrame_BaselineJS);
   1.795 +        push(temp);
   1.796 +        push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
   1.797 +        enterFakeExitFrame();
   1.798 +
   1.799 +        // If monitorStub is non-null, handle resumeAddr appropriately.
   1.800 +        Label noMonitor;
   1.801 +        Label done;
   1.802 +        branchPtr(Assembler::Equal,
   1.803 +                  Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)),
   1.804 +                  ImmPtr(nullptr),
   1.805 +                  &noMonitor);
   1.806 +
   1.807 +        //
   1.808 +        // Resuming into a monitoring stub chain.
   1.809 +        //
   1.810 +        {
   1.811 +            // Save needed values onto stack temporarily.
   1.812 +            pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
   1.813 +            push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
   1.814 +            push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
   1.815 +            push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)));
   1.816 +
   1.817 +            // Call a stub to free allocated memory and create arguments objects.
   1.818 +            setupUnalignedABICall(1, temp);
   1.819 +            passABIArg(bailoutInfo);
   1.820 +            callWithABI(JS_FUNC_TO_DATA_PTR(void *, FinishBailoutToBaseline));
   1.821 +            branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
   1.822 +
   1.823 +            // Restore values where they need to be and resume execution.
   1.824 +            GeneralRegisterSet enterMonRegs(GeneralRegisterSet::All());
   1.825 +            enterMonRegs.take(R0);
   1.826 +            enterMonRegs.take(BaselineStubReg);
   1.827 +            enterMonRegs.take(BaselineFrameReg);
   1.828 +            enterMonRegs.takeUnchecked(BaselineTailCallReg);
   1.829 +
   1.830 +            pop(BaselineStubReg);
   1.831 +            pop(BaselineTailCallReg);
   1.832 +            pop(BaselineFrameReg);
   1.833 +            popValue(R0);
   1.834 +
   1.835 +            // Discard exit frame.
   1.836 +            addPtr(Imm32(IonExitFrameLayout::SizeWithFooter()), StackPointer);
   1.837 +
   1.838 +#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
   1.839 +            push(BaselineTailCallReg);
   1.840 +#endif
   1.841 +            jump(Address(BaselineStubReg, ICStub::offsetOfStubCode()));
   1.842 +        }
   1.843 +
   1.844 +        //
   1.845 +        // Resuming into main jitcode.
   1.846 +        //
   1.847 +        bind(&noMonitor);
   1.848 +        {
   1.849 +            // Save needed values onto stack temporarily.
   1.850 +            pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
   1.851 +            pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR1)));
   1.852 +            push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
   1.853 +            push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
   1.854 +
   1.855 +            // Call a stub to free allocated memory and create arguments objects.
   1.856 +            setupUnalignedABICall(1, temp);
   1.857 +            passABIArg(bailoutInfo);
   1.858 +            callWithABI(JS_FUNC_TO_DATA_PTR(void *, FinishBailoutToBaseline));
   1.859 +            branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
   1.860 +
   1.861 +            // Restore values where they need to be and resume execution.
   1.862 +            GeneralRegisterSet enterRegs(GeneralRegisterSet::All());
   1.863 +            enterRegs.take(R0);
   1.864 +            enterRegs.take(R1);
   1.865 +            enterRegs.take(BaselineFrameReg);
   1.866 +            Register jitcodeReg = enterRegs.takeAny();
   1.867 +
   1.868 +            pop(jitcodeReg);
   1.869 +            pop(BaselineFrameReg);
   1.870 +            popValue(R1);
   1.871 +            popValue(R0);
   1.872 +
   1.873 +            // Discard exit frame.
   1.874 +            addPtr(Imm32(IonExitFrameLayout::SizeWithFooter()), StackPointer);
   1.875 +
   1.876 +            jump(jitcodeReg);
   1.877 +        }
   1.878 +    }
   1.879 +}
   1.880 +
   1.881 +void
   1.882 +MacroAssembler::loadBaselineOrIonRaw(Register script, Register dest, ExecutionMode mode,
   1.883 +                                     Label *failure)
   1.884 +{
   1.885 +    if (mode == SequentialExecution) {
   1.886 +        loadPtr(Address(script, JSScript::offsetOfBaselineOrIonRaw()), dest);
   1.887 +        if (failure)
   1.888 +            branchTestPtr(Assembler::Zero, dest, dest, failure);
   1.889 +    } else {
   1.890 +        loadPtr(Address(script, JSScript::offsetOfParallelIonScript()), dest);
   1.891 +        if (failure)
   1.892 +            branchPtr(Assembler::BelowOrEqual, dest, ImmPtr(ION_COMPILING_SCRIPT), failure);
   1.893 +        loadPtr(Address(dest, IonScript::offsetOfMethod()), dest);
   1.894 +        loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
   1.895 +    }
   1.896 +}
   1.897 +
   1.898 +void
   1.899 +MacroAssembler::loadBaselineOrIonNoArgCheck(Register script, Register dest, ExecutionMode mode,
   1.900 +                                            Label *failure)
   1.901 +{
   1.902 +    if (mode == SequentialExecution) {
   1.903 +        loadPtr(Address(script, JSScript::offsetOfBaselineOrIonSkipArgCheck()), dest);
   1.904 +        if (failure)
   1.905 +            branchTestPtr(Assembler::Zero, dest, dest, failure);
   1.906 +    } else {
   1.907 +        // Find second register to get the offset to skip argument check
   1.908 +        Register offset = script;
   1.909 +        if (script == dest) {
   1.910 +            GeneralRegisterSet regs(GeneralRegisterSet::All());
   1.911 +            regs.take(dest);
   1.912 +            offset = regs.takeAny();
   1.913 +        }
   1.914 +
   1.915 +        loadPtr(Address(script, JSScript::offsetOfParallelIonScript()), dest);
   1.916 +        if (failure)
   1.917 +            branchPtr(Assembler::BelowOrEqual, dest, ImmPtr(ION_COMPILING_SCRIPT), failure);
   1.918 +
   1.919 +        Push(offset);
   1.920 +        load32(Address(script, IonScript::offsetOfSkipArgCheckEntryOffset()), offset);
   1.921 +
   1.922 +        loadPtr(Address(dest, IonScript::offsetOfMethod()), dest);
   1.923 +        loadPtr(Address(dest, JitCode::offsetOfCode()), dest);
   1.924 +        addPtr(offset, dest);
   1.925 +
   1.926 +        Pop(offset);
   1.927 +    }
   1.928 +}
   1.929 +
   1.930 +void
   1.931 +MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest)
   1.932 +{
   1.933 +    if (framePtr != dest)
   1.934 +        movePtr(framePtr, dest);
   1.935 +    subPtr(Imm32(BaselineFrame::Size()), dest);
   1.936 +}
   1.937 +
   1.938 +void
   1.939 +MacroAssembler::loadForkJoinContext(Register cx, Register scratch)
   1.940 +{
   1.941 +    // Load the current ForkJoinContext *. If we need a parallel exit frame,
   1.942 +    // chances are we are about to do something very slow anyways, so just
   1.943 +    // call ForkJoinContextPar again instead of using the cached version.
   1.944 +    setupUnalignedABICall(0, scratch);
   1.945 +    callWithABI(JS_FUNC_TO_DATA_PTR(void *, ForkJoinContextPar));
   1.946 +    if (ReturnReg != cx)
   1.947 +        movePtr(ReturnReg, cx);
   1.948 +}
   1.949 +
   1.950 +void
   1.951 +MacroAssembler::loadContext(Register cxReg, Register scratch, ExecutionMode executionMode)
   1.952 +{
   1.953 +    switch (executionMode) {
   1.954 +      case SequentialExecution:
   1.955 +        // The scratch register is not used for sequential execution.
   1.956 +        loadJSContext(cxReg);
   1.957 +        break;
   1.958 +      case ParallelExecution:
   1.959 +        loadForkJoinContext(cxReg, scratch);
   1.960 +        break;
   1.961 +      default:
   1.962 +        MOZ_ASSUME_UNREACHABLE("No such execution mode");
   1.963 +    }
   1.964 +}
   1.965 +
   1.966 +void
   1.967 +MacroAssembler::enterParallelExitFrameAndLoadContext(const VMFunction *f, Register cx,
   1.968 +                                                     Register scratch)
   1.969 +{
   1.970 +    loadForkJoinContext(cx, scratch);
   1.971 +    // Load the PerThreadData from from the cx.
   1.972 +    loadPtr(Address(cx, offsetof(ForkJoinContext, perThreadData)), scratch);
   1.973 +    linkParallelExitFrame(scratch);
   1.974 +    // Push the ioncode.
   1.975 +    exitCodePatch_ = PushWithPatch(ImmWord(-1));
   1.976 +    // Push the VMFunction pointer, to mark arguments.
   1.977 +    Push(ImmPtr(f));
   1.978 +}
   1.979 +
   1.980 +void
   1.981 +MacroAssembler::enterFakeParallelExitFrame(Register cx, Register scratch,
   1.982 +                                           JitCode *codeVal)
   1.983 +{
   1.984 +    // Load the PerThreadData from from the cx.
   1.985 +    loadPtr(Address(cx, offsetof(ForkJoinContext, perThreadData)), scratch);
   1.986 +    linkParallelExitFrame(scratch);
   1.987 +    Push(ImmPtr(codeVal));
   1.988 +    Push(ImmPtr(nullptr));
   1.989 +}
   1.990 +
   1.991 +void
   1.992 +MacroAssembler::enterExitFrameAndLoadContext(const VMFunction *f, Register cxReg, Register scratch,
   1.993 +                                             ExecutionMode executionMode)
   1.994 +{
   1.995 +    switch (executionMode) {
   1.996 +      case SequentialExecution:
   1.997 +        // The scratch register is not used for sequential execution.
   1.998 +        enterExitFrame(f);
   1.999 +        loadJSContext(cxReg);
  1.1000 +        break;
  1.1001 +      case ParallelExecution:
  1.1002 +        enterParallelExitFrameAndLoadContext(f, cxReg, scratch);
  1.1003 +        break;
  1.1004 +      default:
  1.1005 +        MOZ_ASSUME_UNREACHABLE("No such execution mode");
  1.1006 +    }
  1.1007 +}
  1.1008 +
  1.1009 +void
  1.1010 +MacroAssembler::enterFakeExitFrame(Register cxReg, Register scratch,
  1.1011 +                                   ExecutionMode executionMode,
  1.1012 +                                   JitCode *codeVal)
  1.1013 +{
  1.1014 +    switch (executionMode) {
  1.1015 +      case SequentialExecution:
  1.1016 +        // The cx and scratch registers are not used for sequential execution.
  1.1017 +        enterFakeExitFrame(codeVal);
  1.1018 +        break;
  1.1019 +      case ParallelExecution:
  1.1020 +        enterFakeParallelExitFrame(cxReg, scratch, codeVal);
  1.1021 +        break;
  1.1022 +      default:
  1.1023 +        MOZ_ASSUME_UNREACHABLE("No such execution mode");
  1.1024 +    }
  1.1025 +}
  1.1026 +
  1.1027 +void
  1.1028 +MacroAssembler::handleFailure(ExecutionMode executionMode)
  1.1029 +{
  1.1030 +    // Re-entry code is irrelevant because the exception will leave the
  1.1031 +    // running function and never come back
  1.1032 +    if (sps_)
  1.1033 +        sps_->skipNextReenter();
  1.1034 +    leaveSPSFrame();
  1.1035 +
  1.1036 +    void *handler;
  1.1037 +    switch (executionMode) {
  1.1038 +      case SequentialExecution:
  1.1039 +        handler = JS_FUNC_TO_DATA_PTR(void *, jit::HandleException);
  1.1040 +        break;
  1.1041 +      case ParallelExecution:
  1.1042 +        handler = JS_FUNC_TO_DATA_PTR(void *, jit::HandleParallelFailure);
  1.1043 +        break;
  1.1044 +      default:
  1.1045 +        MOZ_ASSUME_UNREACHABLE("No such execution mode");
  1.1046 +    }
  1.1047 +    MacroAssemblerSpecific::handleFailureWithHandler(handler);
  1.1048 +
  1.1049 +    // Doesn't actually emit code, but balances the leave()
  1.1050 +    if (sps_)
  1.1051 +        sps_->reenter(*this, InvalidReg);
  1.1052 +}
  1.1053 +
  1.1054 +#ifdef DEBUG
  1.1055 +static inline bool
  1.1056 +IsCompilingAsmJS()
  1.1057 +{
  1.1058 +    // asm.js compilation pushes an IonContext with a null JSCompartment.
  1.1059 +    IonContext *ictx = MaybeGetIonContext();
  1.1060 +    return ictx && ictx->compartment == nullptr;
  1.1061 +}
  1.1062 +
  1.1063 +static void
  1.1064 +AssumeUnreachable_(const char *output) {
  1.1065 +    MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
  1.1066 +}
  1.1067 +#endif
  1.1068 +
  1.1069 +void
  1.1070 +MacroAssembler::assumeUnreachable(const char *output)
  1.1071 +{
  1.1072 +#ifdef DEBUG
  1.1073 +    RegisterSet regs = RegisterSet::Volatile();
  1.1074 +    PushRegsInMask(regs);
  1.1075 +    Register temp = regs.takeGeneral();
  1.1076 +
  1.1077 +    // With ASLR, we can't rely on 'output' to point to the
  1.1078 +    // same char array after serialization/deserialization.
  1.1079 +    // It is not possible until we modify AsmJsImmPtr and
  1.1080 +    // the underlying "patching" mechanism.
  1.1081 +    if (IsCompilingAsmJS()) {
  1.1082 +        setupUnalignedABICall(0, temp);
  1.1083 +        callWithABINoProfiling(AsmJSImm_AssumeUnreachable);
  1.1084 +    } else {
  1.1085 +        setupUnalignedABICall(1, temp);
  1.1086 +        movePtr(ImmPtr(output), temp);
  1.1087 +        passABIArg(temp);
  1.1088 +        callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, AssumeUnreachable_));
  1.1089 +    }
  1.1090 +    PopRegsInMask(RegisterSet::Volatile());
  1.1091 +#endif
  1.1092 +
  1.1093 +    breakpoint();
  1.1094 +}
  1.1095 +
  1.1096 +static void
  1.1097 +Printf0_(const char *output) {
  1.1098 +    printf("%s", output);
  1.1099 +}
  1.1100 +
  1.1101 +void
  1.1102 +MacroAssembler::printf(const char *output)
  1.1103 +{
  1.1104 +    RegisterSet regs = RegisterSet::Volatile();
  1.1105 +    PushRegsInMask(regs);
  1.1106 +
  1.1107 +    Register temp = regs.takeGeneral();
  1.1108 +
  1.1109 +    setupUnalignedABICall(1, temp);
  1.1110 +    movePtr(ImmPtr(output), temp);
  1.1111 +    passABIArg(temp);
  1.1112 +    callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, Printf0_));
  1.1113 +
  1.1114 +    PopRegsInMask(RegisterSet::Volatile());
  1.1115 +}
  1.1116 +
  1.1117 +static void
  1.1118 +Printf1_(const char *output, uintptr_t value) {
  1.1119 +    char *line = JS_sprintf_append(nullptr, output, value);
  1.1120 +    printf("%s", line);
  1.1121 +    js_free(line);
  1.1122 +}
  1.1123 +
  1.1124 +void
  1.1125 +MacroAssembler::printf(const char *output, Register value)
  1.1126 +{
  1.1127 +    RegisterSet regs = RegisterSet::Volatile();
  1.1128 +    PushRegsInMask(regs);
  1.1129 +
  1.1130 +    regs.takeUnchecked(value);
  1.1131 +
  1.1132 +    Register temp = regs.takeGeneral();
  1.1133 +
  1.1134 +    setupUnalignedABICall(2, temp);
  1.1135 +    movePtr(ImmPtr(output), temp);
  1.1136 +    passABIArg(temp);
  1.1137 +    passABIArg(value);
  1.1138 +    callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, Printf1_));
  1.1139 +
  1.1140 +    PopRegsInMask(RegisterSet::Volatile());
  1.1141 +}
  1.1142 +
  1.1143 +#ifdef JS_TRACE_LOGGING
  1.1144 +void
  1.1145 +MacroAssembler::tracelogStart(Register logger, uint32_t textId)
  1.1146 +{
  1.1147 +    void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStartEvent;
  1.1148 +
  1.1149 +    PushRegsInMask(RegisterSet::Volatile());
  1.1150 +
  1.1151 +    RegisterSet regs = RegisterSet::Volatile();
  1.1152 +    regs.takeUnchecked(logger);
  1.1153 +
  1.1154 +    Register temp = regs.takeGeneral();
  1.1155 +
  1.1156 +    setupUnalignedABICall(2, temp);
  1.1157 +    passABIArg(logger);
  1.1158 +    move32(Imm32(textId), temp);
  1.1159 +    passABIArg(temp);
  1.1160 +    callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1.1161 +
  1.1162 +    PopRegsInMask(RegisterSet::Volatile());
  1.1163 +}
  1.1164 +
  1.1165 +void
  1.1166 +MacroAssembler::tracelogStart(Register logger, Register textId)
  1.1167 +{
  1.1168 +    void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStartEvent;
  1.1169 +
  1.1170 +    PushRegsInMask(RegisterSet::Volatile());
  1.1171 +
  1.1172 +    RegisterSet regs = RegisterSet::Volatile();
  1.1173 +    regs.takeUnchecked(logger);
  1.1174 +    regs.takeUnchecked(textId);
  1.1175 +
  1.1176 +    Register temp = regs.takeGeneral();
  1.1177 +
  1.1178 +    setupUnalignedABICall(2, temp);
  1.1179 +    passABIArg(logger);
  1.1180 +    passABIArg(textId);
  1.1181 +    callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1.1182 +
  1.1183 +    regs.add(temp);
  1.1184 +
  1.1185 +    PopRegsInMask(RegisterSet::Volatile());
  1.1186 +}
  1.1187 +
  1.1188 +void
  1.1189 +MacroAssembler::tracelogStop(Register logger, uint32_t textId)
  1.1190 +{
  1.1191 +    void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStopEvent;
  1.1192 +
  1.1193 +    PushRegsInMask(RegisterSet::Volatile());
  1.1194 +
  1.1195 +    RegisterSet regs = RegisterSet::Volatile();
  1.1196 +    regs.takeUnchecked(logger);
  1.1197 +
  1.1198 +    Register temp = regs.takeGeneral();
  1.1199 +
  1.1200 +    setupUnalignedABICall(2, temp);
  1.1201 +    passABIArg(logger);
  1.1202 +    move32(Imm32(textId), temp);
  1.1203 +    passABIArg(temp);
  1.1204 +    callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1.1205 +
  1.1206 +    regs.add(temp);
  1.1207 +
  1.1208 +    PopRegsInMask(RegisterSet::Volatile());
  1.1209 +}
  1.1210 +
  1.1211 +void
  1.1212 +MacroAssembler::tracelogStop(Register logger, Register textId)
  1.1213 +{
  1.1214 +#ifdef DEBUG
  1.1215 +    void (&TraceLogFunc)(TraceLogger*, uint32_t) = TraceLogStopEvent;
  1.1216 +
  1.1217 +    PushRegsInMask(RegisterSet::Volatile());
  1.1218 +
  1.1219 +    RegisterSet regs = RegisterSet::Volatile();
  1.1220 +    regs.takeUnchecked(logger);
  1.1221 +    regs.takeUnchecked(textId);
  1.1222 +
  1.1223 +    Register temp = regs.takeGeneral();
  1.1224 +
  1.1225 +    setupUnalignedABICall(2, temp);
  1.1226 +    passABIArg(logger);
  1.1227 +    passABIArg(textId);
  1.1228 +    callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1.1229 +
  1.1230 +    regs.add(temp);
  1.1231 +
  1.1232 +    PopRegsInMask(RegisterSet::Volatile());
  1.1233 +#else
  1.1234 +    tracelogStop(logger);
  1.1235 +#endif
  1.1236 +}
  1.1237 +
  1.1238 +void
  1.1239 +MacroAssembler::tracelogStop(Register logger)
  1.1240 +{
  1.1241 +    void (&TraceLogFunc)(TraceLogger*) = TraceLogStopEvent;
  1.1242 +
  1.1243 +    PushRegsInMask(RegisterSet::Volatile());
  1.1244 +
  1.1245 +    RegisterSet regs = RegisterSet::Volatile();
  1.1246 +    regs.takeUnchecked(logger);
  1.1247 +
  1.1248 +    Register temp = regs.takeGeneral();
  1.1249 +
  1.1250 +    setupUnalignedABICall(1, temp);
  1.1251 +    passABIArg(logger);
  1.1252 +    callWithABINoProfiling(JS_FUNC_TO_DATA_PTR(void *, TraceLogFunc));
  1.1253 +
  1.1254 +    regs.add(temp);
  1.1255 +
  1.1256 +    PopRegsInMask(RegisterSet::Volatile());
  1.1257 +}
  1.1258 +#endif
  1.1259 +
  1.1260 +void
  1.1261 +MacroAssembler::convertInt32ValueToDouble(const Address &address, Register scratch, Label *done)
  1.1262 +{
  1.1263 +    branchTestInt32(Assembler::NotEqual, address, done);
  1.1264 +    unboxInt32(address, scratch);
  1.1265 +    convertInt32ToDouble(scratch, ScratchFloatReg);
  1.1266 +    storeDouble(ScratchFloatReg, address);
  1.1267 +}
  1.1268 +
  1.1269 +void
  1.1270 +MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
  1.1271 +                                            Label *fail, MIRType outputType)
  1.1272 +{
  1.1273 +    Register tag = splitTagForTest(value);
  1.1274 +
  1.1275 +    Label isDouble, isInt32, isBool, isNull, done;
  1.1276 +
  1.1277 +    branchTestDouble(Assembler::Equal, tag, &isDouble);
  1.1278 +    branchTestInt32(Assembler::Equal, tag, &isInt32);
  1.1279 +    branchTestBoolean(Assembler::Equal, tag, &isBool);
  1.1280 +    branchTestNull(Assembler::Equal, tag, &isNull);
  1.1281 +    branchTestUndefined(Assembler::NotEqual, tag, fail);
  1.1282 +
  1.1283 +    // fall-through: undefined
  1.1284 +    loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
  1.1285 +    jump(&done);
  1.1286 +
  1.1287 +    bind(&isNull);
  1.1288 +    loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1.1289 +    jump(&done);
  1.1290 +
  1.1291 +    bind(&isBool);
  1.1292 +    boolValueToFloatingPoint(value, output, outputType);
  1.1293 +    jump(&done);
  1.1294 +
  1.1295 +    bind(&isInt32);
  1.1296 +    int32ValueToFloatingPoint(value, output, outputType);
  1.1297 +    jump(&done);
  1.1298 +
  1.1299 +    bind(&isDouble);
  1.1300 +    unboxDouble(value, output);
  1.1301 +    if (outputType == MIRType_Float32)
  1.1302 +        convertDoubleToFloat32(output, output);
  1.1303 +    bind(&done);
  1.1304 +}
  1.1305 +
  1.1306 +bool
  1.1307 +MacroAssembler::convertValueToFloatingPoint(JSContext *cx, const Value &v, FloatRegister output,
  1.1308 +                                            Label *fail, MIRType outputType)
  1.1309 +{
  1.1310 +    if (v.isNumber() || v.isString()) {
  1.1311 +        double d;
  1.1312 +        if (v.isNumber())
  1.1313 +            d = v.toNumber();
  1.1314 +        else if (!StringToNumber(cx, v.toString(), &d))
  1.1315 +            return false;
  1.1316 +
  1.1317 +        loadConstantFloatingPoint(d, (float)d, output, outputType);
  1.1318 +        return true;
  1.1319 +    }
  1.1320 +
  1.1321 +    if (v.isBoolean()) {
  1.1322 +        if (v.toBoolean())
  1.1323 +            loadConstantFloatingPoint(1.0, 1.0f, output, outputType);
  1.1324 +        else
  1.1325 +            loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1.1326 +        return true;
  1.1327 +    }
  1.1328 +
  1.1329 +    if (v.isNull()) {
  1.1330 +        loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1.1331 +        return true;
  1.1332 +    }
  1.1333 +
  1.1334 +    if (v.isUndefined()) {
  1.1335 +        loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
  1.1336 +        return true;
  1.1337 +    }
  1.1338 +
  1.1339 +    JS_ASSERT(v.isObject());
  1.1340 +    jump(fail);
  1.1341 +    return true;
  1.1342 +}
  1.1343 +
  1.1344 +void
  1.1345 +MacroAssembler::PushEmptyRooted(VMFunction::RootType rootType)
  1.1346 +{
  1.1347 +    switch (rootType) {
  1.1348 +      case VMFunction::RootNone:
  1.1349 +        MOZ_ASSUME_UNREACHABLE("Handle must have root type");
  1.1350 +      case VMFunction::RootObject:
  1.1351 +      case VMFunction::RootString:
  1.1352 +      case VMFunction::RootPropertyName:
  1.1353 +      case VMFunction::RootFunction:
  1.1354 +      case VMFunction::RootCell:
  1.1355 +        Push(ImmPtr(nullptr));
  1.1356 +        break;
  1.1357 +      case VMFunction::RootValue:
  1.1358 +        Push(UndefinedValue());
  1.1359 +        break;
  1.1360 +    }
  1.1361 +}
  1.1362 +
  1.1363 +void
  1.1364 +MacroAssembler::popRooted(VMFunction::RootType rootType, Register cellReg,
  1.1365 +                          const ValueOperand &valueReg)
  1.1366 +{
  1.1367 +    switch (rootType) {
  1.1368 +      case VMFunction::RootNone:
  1.1369 +        MOZ_ASSUME_UNREACHABLE("Handle must have root type");
  1.1370 +      case VMFunction::RootObject:
  1.1371 +      case VMFunction::RootString:
  1.1372 +      case VMFunction::RootPropertyName:
  1.1373 +      case VMFunction::RootFunction:
  1.1374 +      case VMFunction::RootCell:
  1.1375 +        Pop(cellReg);
  1.1376 +        break;
  1.1377 +      case VMFunction::RootValue:
  1.1378 +        Pop(valueReg);
  1.1379 +        break;
  1.1380 +    }
  1.1381 +}
  1.1382 +
  1.1383 +bool
  1.1384 +MacroAssembler::convertConstantOrRegisterToFloatingPoint(JSContext *cx, ConstantOrRegister src,
  1.1385 +                                                         FloatRegister output, Label *fail,
  1.1386 +                                                         MIRType outputType)
  1.1387 +{
  1.1388 +    if (src.constant())
  1.1389 +        return convertValueToFloatingPoint(cx, src.value(), output, fail, outputType);
  1.1390 +
  1.1391 +    convertTypedOrValueToFloatingPoint(src.reg(), output, fail, outputType);
  1.1392 +    return true;
  1.1393 +}
  1.1394 +
  1.1395 +void
  1.1396 +MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
  1.1397 +                                                   Label *fail, MIRType outputType)
  1.1398 +{
  1.1399 +    JS_ASSERT(IsFloatingPointType(outputType));
  1.1400 +
  1.1401 +    if (src.hasValue()) {
  1.1402 +        convertValueToFloatingPoint(src.valueReg(), output, fail, outputType);
  1.1403 +        return;
  1.1404 +    }
  1.1405 +
  1.1406 +    bool outputIsDouble = outputType == MIRType_Double;
  1.1407 +    switch (src.type()) {
  1.1408 +      case MIRType_Null:
  1.1409 +        loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
  1.1410 +        break;
  1.1411 +      case MIRType_Boolean:
  1.1412 +      case MIRType_Int32:
  1.1413 +        convertInt32ToFloatingPoint(src.typedReg().gpr(), output, outputType);
  1.1414 +        break;
  1.1415 +      case MIRType_Float32:
  1.1416 +        if (outputIsDouble) {
  1.1417 +            convertFloat32ToDouble(src.typedReg().fpu(), output);
  1.1418 +        } else {
  1.1419 +            if (src.typedReg().fpu() != output)
  1.1420 +                moveFloat32(src.typedReg().fpu(), output);
  1.1421 +        }
  1.1422 +        break;
  1.1423 +      case MIRType_Double:
  1.1424 +        if (outputIsDouble) {
  1.1425 +            if (src.typedReg().fpu() != output)
  1.1426 +                moveDouble(src.typedReg().fpu(), output);
  1.1427 +        } else {
  1.1428 +            convertDoubleToFloat32(src.typedReg().fpu(), output);
  1.1429 +        }
  1.1430 +        break;
  1.1431 +      case MIRType_Object:
  1.1432 +      case MIRType_String:
  1.1433 +        jump(fail);
  1.1434 +        break;
  1.1435 +      case MIRType_Undefined:
  1.1436 +        loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
  1.1437 +        break;
  1.1438 +      default:
  1.1439 +        MOZ_ASSUME_UNREACHABLE("Bad MIRType");
  1.1440 +    }
  1.1441 +}
  1.1442 +
  1.1443 +void
  1.1444 +MacroAssembler::convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
  1.1445 +                                   Label *truncateFail, Label *fail,
  1.1446 +                                   IntConversionBehavior behavior)
  1.1447 +{
  1.1448 +    switch (behavior) {
  1.1449 +      case IntConversion_Normal:
  1.1450 +      case IntConversion_NegativeZeroCheck:
  1.1451 +        convertDoubleToInt32(src, output, fail, behavior == IntConversion_NegativeZeroCheck);
  1.1452 +        break;
  1.1453 +      case IntConversion_Truncate:
  1.1454 +        branchTruncateDouble(src, output, truncateFail ? truncateFail : fail);
  1.1455 +        break;
  1.1456 +      case IntConversion_ClampToUint8:
  1.1457 +        // Clamping clobbers the input register, so use a temp.
  1.1458 +        moveDouble(src, temp);
  1.1459 +        clampDoubleToUint8(temp, output);
  1.1460 +        break;
  1.1461 +    }
  1.1462 +}
  1.1463 +
  1.1464 +void
  1.1465 +MacroAssembler::convertValueToInt(ValueOperand value, MDefinition *maybeInput,
  1.1466 +                                  Label *handleStringEntry, Label *handleStringRejoin,
  1.1467 +                                  Label *truncateDoubleSlow,
  1.1468 +                                  Register stringReg, FloatRegister temp, Register output,
  1.1469 +                                  Label *fail, IntConversionBehavior behavior,
  1.1470 +                                  IntConversionInputKind conversion)
  1.1471 +{
  1.1472 +    Register tag = splitTagForTest(value);
  1.1473 +    bool handleStrings = (behavior == IntConversion_Truncate ||
  1.1474 +                          behavior == IntConversion_ClampToUint8) &&
  1.1475 +                         handleStringEntry &&
  1.1476 +                         handleStringRejoin;
  1.1477 +
  1.1478 +    JS_ASSERT_IF(handleStrings, conversion == IntConversion_Any);
  1.1479 +
  1.1480 +    Label done, isInt32, isBool, isDouble, isNull, isString;
  1.1481 +
  1.1482 +    branchEqualTypeIfNeeded(MIRType_Int32, maybeInput, tag, &isInt32);
  1.1483 +    if (conversion == IntConversion_Any || conversion == IntConversion_NumbersOrBoolsOnly)
  1.1484 +        branchEqualTypeIfNeeded(MIRType_Boolean, maybeInput, tag, &isBool);
  1.1485 +    branchEqualTypeIfNeeded(MIRType_Double, maybeInput, tag, &isDouble);
  1.1486 +
  1.1487 +    if (conversion == IntConversion_Any) {
  1.1488 +        // If we are not truncating, we fail for anything that's not
  1.1489 +        // null. Otherwise we might be able to handle strings and objects.
  1.1490 +        switch (behavior) {
  1.1491 +          case IntConversion_Normal:
  1.1492 +          case IntConversion_NegativeZeroCheck:
  1.1493 +            branchTestNull(Assembler::NotEqual, tag, fail);
  1.1494 +            break;
  1.1495 +
  1.1496 +          case IntConversion_Truncate:
  1.1497 +          case IntConversion_ClampToUint8:
  1.1498 +            branchEqualTypeIfNeeded(MIRType_Null, maybeInput, tag, &isNull);
  1.1499 +            if (handleStrings)
  1.1500 +                branchEqualTypeIfNeeded(MIRType_String, maybeInput, tag, &isString);
  1.1501 +            branchEqualTypeIfNeeded(MIRType_Object, maybeInput, tag, fail);
  1.1502 +            branchTestUndefined(Assembler::NotEqual, tag, fail);
  1.1503 +            break;
  1.1504 +        }
  1.1505 +    } else {
  1.1506 +        jump(fail);
  1.1507 +    }
  1.1508 +
  1.1509 +    // The value is null or undefined in truncation contexts - just emit 0.
  1.1510 +    if (isNull.used())
  1.1511 +        bind(&isNull);
  1.1512 +    mov(ImmWord(0), output);
  1.1513 +    jump(&done);
  1.1514 +
  1.1515 +    // Try converting a string into a double, then jump to the double case.
  1.1516 +    if (handleStrings) {
  1.1517 +        bind(&isString);
  1.1518 +        unboxString(value, stringReg);
  1.1519 +        jump(handleStringEntry);
  1.1520 +    }
  1.1521 +
  1.1522 +    // Try converting double into integer.
  1.1523 +    if (isDouble.used() || handleStrings) {
  1.1524 +        if (isDouble.used()) {
  1.1525 +            bind(&isDouble);
  1.1526 +            unboxDouble(value, temp);
  1.1527 +        }
  1.1528 +
  1.1529 +        if (handleStrings)
  1.1530 +            bind(handleStringRejoin);
  1.1531 +
  1.1532 +        convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
  1.1533 +        jump(&done);
  1.1534 +    }
  1.1535 +
  1.1536 +    // Just unbox a bool, the result is 0 or 1.
  1.1537 +    if (isBool.used()) {
  1.1538 +        bind(&isBool);
  1.1539 +        unboxBoolean(value, output);
  1.1540 +        jump(&done);
  1.1541 +    }
  1.1542 +
  1.1543 +    // Integers can be unboxed.
  1.1544 +    if (isInt32.used()) {
  1.1545 +        bind(&isInt32);
  1.1546 +        unboxInt32(value, output);
  1.1547 +        if (behavior == IntConversion_ClampToUint8)
  1.1548 +            clampIntToUint8(output);
  1.1549 +    }
  1.1550 +
  1.1551 +    bind(&done);
  1.1552 +}
  1.1553 +
  1.1554 +bool
  1.1555 +MacroAssembler::convertValueToInt(JSContext *cx, const Value &v, Register output, Label *fail,
  1.1556 +                                  IntConversionBehavior behavior)
  1.1557 +{
  1.1558 +    bool handleStrings = (behavior == IntConversion_Truncate ||
  1.1559 +                          behavior == IntConversion_ClampToUint8);
  1.1560 +
  1.1561 +    if (v.isNumber() || (handleStrings && v.isString())) {
  1.1562 +        double d;
  1.1563 +        if (v.isNumber())
  1.1564 +            d = v.toNumber();
  1.1565 +        else if (!StringToNumber(cx, v.toString(), &d))
  1.1566 +            return false;
  1.1567 +
  1.1568 +        switch (behavior) {
  1.1569 +          case IntConversion_Normal:
  1.1570 +          case IntConversion_NegativeZeroCheck: {
  1.1571 +            // -0 is checked anyways if we have a constant value.
  1.1572 +            int i;
  1.1573 +            if (mozilla::NumberIsInt32(d, &i))
  1.1574 +                move32(Imm32(i), output);
  1.1575 +            else
  1.1576 +                jump(fail);
  1.1577 +            break;
  1.1578 +          }
  1.1579 +          case IntConversion_Truncate:
  1.1580 +            move32(Imm32(js::ToInt32(d)), output);
  1.1581 +            break;
  1.1582 +          case IntConversion_ClampToUint8:
  1.1583 +            move32(Imm32(ClampDoubleToUint8(d)), output);
  1.1584 +            break;
  1.1585 +        }
  1.1586 +
  1.1587 +        return true;
  1.1588 +    }
  1.1589 +
  1.1590 +    if (v.isBoolean()) {
  1.1591 +        move32(Imm32(v.toBoolean() ? 1 : 0), output);
  1.1592 +        return true;
  1.1593 +    }
  1.1594 +
  1.1595 +    if (v.isNull() || v.isUndefined()) {
  1.1596 +        move32(Imm32(0), output);
  1.1597 +        return true;
  1.1598 +    }
  1.1599 +
  1.1600 +    JS_ASSERT(v.isObject());
  1.1601 +
  1.1602 +    jump(fail);
  1.1603 +    return true;
  1.1604 +}
  1.1605 +
  1.1606 +bool
  1.1607 +MacroAssembler::convertConstantOrRegisterToInt(JSContext *cx, ConstantOrRegister src,
  1.1608 +                                               FloatRegister temp, Register output,
  1.1609 +                                               Label *fail, IntConversionBehavior behavior)
  1.1610 +{
  1.1611 +    if (src.constant())
  1.1612 +        return convertValueToInt(cx, src.value(), output, fail, behavior);
  1.1613 +
  1.1614 +    convertTypedOrValueToInt(src.reg(), temp, output, fail, behavior);
  1.1615 +    return true;
  1.1616 +}
  1.1617 +
  1.1618 +void
  1.1619 +MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp,
  1.1620 +                                         Register output, Label *fail,
  1.1621 +                                         IntConversionBehavior behavior)
  1.1622 +{
  1.1623 +    if (src.hasValue()) {
  1.1624 +        convertValueToInt(src.valueReg(), temp, output, fail, behavior);
  1.1625 +        return;
  1.1626 +    }
  1.1627 +
  1.1628 +    switch (src.type()) {
  1.1629 +      case MIRType_Undefined:
  1.1630 +      case MIRType_Null:
  1.1631 +        move32(Imm32(0), output);
  1.1632 +        break;
  1.1633 +      case MIRType_Boolean:
  1.1634 +      case MIRType_Int32:
  1.1635 +        if (src.typedReg().gpr() != output)
  1.1636 +            move32(src.typedReg().gpr(), output);
  1.1637 +        if (src.type() == MIRType_Int32 && behavior == IntConversion_ClampToUint8)
  1.1638 +            clampIntToUint8(output);
  1.1639 +        break;
  1.1640 +      case MIRType_Double:
  1.1641 +        convertDoubleToInt(src.typedReg().fpu(), output, temp, nullptr, fail, behavior);
  1.1642 +        break;
  1.1643 +      case MIRType_Float32:
  1.1644 +        // Conversion to Double simplifies implementation at the expense of performance.
  1.1645 +        convertFloat32ToDouble(src.typedReg().fpu(), temp);
  1.1646 +        convertDoubleToInt(temp, output, temp, nullptr, fail, behavior);
  1.1647 +        break;
  1.1648 +      case MIRType_String:
  1.1649 +      case MIRType_Object:
  1.1650 +        jump(fail);
  1.1651 +        break;
  1.1652 +      default:
  1.1653 +        MOZ_ASSUME_UNREACHABLE("Bad MIRType");
  1.1654 +    }
  1.1655 +}
  1.1656 +
  1.1657 +void
  1.1658 +MacroAssembler::finish()
  1.1659 +{
  1.1660 +    if (sequentialFailureLabel_.used()) {
  1.1661 +        bind(&sequentialFailureLabel_);
  1.1662 +        handleFailure(SequentialExecution);
  1.1663 +    }
  1.1664 +    if (parallelFailureLabel_.used()) {
  1.1665 +        bind(&parallelFailureLabel_);
  1.1666 +        handleFailure(ParallelExecution);
  1.1667 +    }
  1.1668 +
  1.1669 +    MacroAssemblerSpecific::finish();
  1.1670 +}
  1.1671 +
  1.1672 +void
  1.1673 +MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label *label)
  1.1674 +{
  1.1675 +    // 16-bit loads are slow and unaligned 32-bit loads may be too so
  1.1676 +    // perform an aligned 32-bit load and adjust the bitmask accordingly.
  1.1677 +    JS_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
  1.1678 +    JS_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
  1.1679 +    JS_STATIC_ASSERT(IS_LITTLE_ENDIAN);
  1.1680 +
  1.1681 +    // Emit code for the following test:
  1.1682 +    //
  1.1683 +    // bool isInterpretedConstructor() const {
  1.1684 +    //     return isInterpreted() && !isFunctionPrototype() && !isArrow() &&
  1.1685 +    //         (!isSelfHostedBuiltin() || isSelfHostedConstructor());
  1.1686 +    // }
  1.1687 +
  1.1688 +    // First, ensure it's a scripted function.
  1.1689 +    load32(Address(fun, JSFunction::offsetOfNargs()), scratch);
  1.1690 +    branchTest32(Assembler::Zero, scratch, Imm32(JSFunction::INTERPRETED << 16), label);
  1.1691 +
  1.1692 +    // Common case: if IS_FUN_PROTO, ARROW and SELF_HOSTED are not set,
  1.1693 +    // the function is an interpreted constructor and we're done.
  1.1694 +    Label done;
  1.1695 +    uint32_t bits = (JSFunction::IS_FUN_PROTO | JSFunction::ARROW | JSFunction::SELF_HOSTED) << 16;
  1.1696 +    branchTest32(Assembler::Zero, scratch, Imm32(bits), &done);
  1.1697 +    {
  1.1698 +        // The callee is either Function.prototype, an arrow function or
  1.1699 +        // self-hosted. None of these are constructible, except self-hosted
  1.1700 +        // constructors, so branch to |label| if SELF_HOSTED_CTOR is not set.
  1.1701 +        branchTest32(Assembler::Zero, scratch, Imm32(JSFunction::SELF_HOSTED_CTOR << 16), label);
  1.1702 +
  1.1703 +#ifdef DEBUG
  1.1704 +        // Function.prototype should not have the SELF_HOSTED_CTOR flag.
  1.1705 +        branchTest32(Assembler::Zero, scratch, Imm32(JSFunction::IS_FUN_PROTO << 16), &done);
  1.1706 +        breakpoint();
  1.1707 +#endif
  1.1708 +    }
  1.1709 +    bind(&done);
  1.1710 +}
  1.1711 +
  1.1712 +void
  1.1713 +MacroAssembler::branchEqualTypeIfNeeded(MIRType type, MDefinition *maybeDef, Register tag,
  1.1714 +                                        Label *label)
  1.1715 +{
  1.1716 +    if (!maybeDef || maybeDef->mightBeType(type)) {
  1.1717 +        switch (type) {
  1.1718 +          case MIRType_Null:
  1.1719 +            branchTestNull(Equal, tag, label);
  1.1720 +            break;
  1.1721 +          case MIRType_Boolean:
  1.1722 +            branchTestBoolean(Equal, tag, label);
  1.1723 +            break;
  1.1724 +          case MIRType_Int32:
  1.1725 +            branchTestInt32(Equal, tag, label);
  1.1726 +            break;
  1.1727 +          case MIRType_Double:
  1.1728 +            branchTestDouble(Equal, tag, label);
  1.1729 +            break;
  1.1730 +          case MIRType_String:
  1.1731 +            branchTestString(Equal, tag, label);
  1.1732 +            break;
  1.1733 +          case MIRType_Object:
  1.1734 +            branchTestObject(Equal, tag, label);
  1.1735 +            break;
  1.1736 +          default:
  1.1737 +            MOZ_ASSUME_UNREACHABLE("Unsupported type");
  1.1738 +        }
  1.1739 +    }
  1.1740 +}
  1.1741 +
  1.1742 +
  1.1743 +// If a pseudostack frame has this as its label, its stack pointer
  1.1744 +// field points to the registers saved on entry to JIT code.  A native
  1.1745 +// stack unwinder could use that information to continue unwinding
  1.1746 +// past that point.
  1.1747 +const char MacroAssembler::enterJitLabel[] = "EnterJIT";
  1.1748 +
  1.1749 +// Creates an enterJIT pseudostack frame, as described above.  Pushes
  1.1750 +// a word to the stack to indicate whether this was done.  |framePtr| is
  1.1751 +// the pointer to the machine-dependent saved state.
  1.1752 +void
  1.1753 +MacroAssembler::spsMarkJit(SPSProfiler *p, Register framePtr, Register temp)
  1.1754 +{
  1.1755 +    Label spsNotEnabled;
  1.1756 +    uint32_t *enabledAddr = p->addressOfEnabled();
  1.1757 +    load32(AbsoluteAddress(enabledAddr), temp);
  1.1758 +    push(temp); // +4: Did we push an sps frame.
  1.1759 +    branchTest32(Assembler::Equal, temp, temp, &spsNotEnabled);
  1.1760 +
  1.1761 +    Label stackFull;
  1.1762 +    // We always need the "safe" versions, because these are used in trampolines
  1.1763 +    // and won't be regenerated when SPS state changes.
  1.1764 +    spsProfileEntryAddressSafe(p, 0, temp, &stackFull);
  1.1765 +
  1.1766 +    storePtr(ImmPtr(enterJitLabel), Address(temp, ProfileEntry::offsetOfString()));
  1.1767 +    storePtr(framePtr,              Address(temp, ProfileEntry::offsetOfStackAddress()));
  1.1768 +    storePtr(ImmWord(uintptr_t(0)), Address(temp, ProfileEntry::offsetOfScript()));
  1.1769 +    store32(Imm32(ProfileEntry::NullPCIndex), Address(temp, ProfileEntry::offsetOfPCIdx()));
  1.1770 +
  1.1771 +    /* Always increment the stack size, whether or not we actually pushed. */
  1.1772 +    bind(&stackFull);
  1.1773 +    loadPtr(AbsoluteAddress(p->addressOfSizePointer()), temp);
  1.1774 +    add32(Imm32(1), Address(temp, 0));
  1.1775 +
  1.1776 +    bind(&spsNotEnabled);
  1.1777 +}
  1.1778 +
  1.1779 +// Pops the word pushed by spsMarkJit and, if spsMarkJit pushed an SPS
  1.1780 +// frame, pops it.
  1.1781 +void
  1.1782 +MacroAssembler::spsUnmarkJit(SPSProfiler *p, Register temp)
  1.1783 +{
  1.1784 +    Label spsNotEnabled;
  1.1785 +    pop(temp); // -4: Was the profiler enabled.
  1.1786 +    branchTest32(Assembler::Equal, temp, temp, &spsNotEnabled);
  1.1787 +
  1.1788 +    spsPopFrameSafe(p, temp);
  1.1789 +
  1.1790 +    bind(&spsNotEnabled);
  1.1791 +}

mercurial